Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-header-fixes' of https://github.com/GabrielL/linux into drm-next

Fix all the problems with the header files and userspace builds
off them. I really care so little about this, but hey who am
I to stop progress.

* 'drm-header-fixes' of https://github.com/GabrielL/linux: (30 commits)
drm: fix inclusion of drm.h in via_drm.h
drm: fix inclusion of drm.h in vmwgfx_drm.h
drm: fix inclusion of drm.h in virtgpu_drm.h
drm: fix inclusion of drm.h in tegra_drm.h
drm: fix inclusion of drm.h in savage_drm.h
drm: fix inclusion of drm.h in r128_drm.h
drm: fix inclusion of drm.h in qxl_drm.h
drm: fix inclusion of drm.h in omap_drm.h
drm: fix inclusion of drm.h in msm_drm.h
drm: fix inclusion of drm.h in mga_drm.h
drm: fix inclusion of drm.h in exynos_sarea.h
drm: fix inclusion of drm.h in i810_drm.h
drm: fix inclusion of drm.h in exynos_sarea.h
drm: fix inclusion of drm.h in drm_sarea.h
drm: drm_mode.h fix includes
drm: drm_fourcc.h fix includes
drm: include drm.h in armada_drm.h
include/uapi/drm/amdgpu_drm.h: use __u32 and __u64 from <linux/types.h>
drm: Kbuild: add admgpu_drm.h to the installed headers
drm: use __u{32,64} instead of uint{32,64}_t in virtgpu_drm.h
...

+508 -509
+1
include/uapi/drm/Kbuild
··· 3 3 header-y += drm_fourcc.h 4 4 header-y += drm_mode.h 5 5 header-y += drm_sarea.h 6 + header-y += amdgpu_drm.h 6 7 header-y += exynos_drm.h 7 8 header-y += i810_drm.h 8 9 header-y += i915_drm.h
+145 -145
include/uapi/drm/amdgpu_drm.h
··· 76 76 77 77 struct drm_amdgpu_gem_create_in { 78 78 /** the requested memory size */ 79 - uint64_t bo_size; 79 + __u64 bo_size; 80 80 /** physical start_addr alignment in bytes for some HW requirements */ 81 - uint64_t alignment; 81 + __u64 alignment; 82 82 /** the requested memory domains */ 83 - uint64_t domains; 83 + __u64 domains; 84 84 /** allocation flags */ 85 - uint64_t domain_flags; 85 + __u64 domain_flags; 86 86 }; 87 87 88 88 struct drm_amdgpu_gem_create_out { 89 89 /** returned GEM object handle */ 90 - uint32_t handle; 91 - uint32_t _pad; 90 + __u32 handle; 91 + __u32 _pad; 92 92 }; 93 93 94 94 union drm_amdgpu_gem_create { ··· 105 105 106 106 struct drm_amdgpu_bo_list_in { 107 107 /** Type of operation */ 108 - uint32_t operation; 108 + __u32 operation; 109 109 /** Handle of list or 0 if we want to create one */ 110 - uint32_t list_handle; 110 + __u32 list_handle; 111 111 /** Number of BOs in list */ 112 - uint32_t bo_number; 112 + __u32 bo_number; 113 113 /** Size of each element describing BO */ 114 - uint32_t bo_info_size; 114 + __u32 bo_info_size; 115 115 /** Pointer to array describing BOs */ 116 - uint64_t bo_info_ptr; 116 + __u64 bo_info_ptr; 117 117 }; 118 118 119 119 struct drm_amdgpu_bo_list_entry { 120 120 /** Handle of BO */ 121 - uint32_t bo_handle; 121 + __u32 bo_handle; 122 122 /** New (if specified) BO priority to be used during migration */ 123 - uint32_t bo_priority; 123 + __u32 bo_priority; 124 124 }; 125 125 126 126 struct drm_amdgpu_bo_list_out { 127 127 /** Handle of resource list */ 128 - uint32_t list_handle; 129 - uint32_t _pad; 128 + __u32 list_handle; 129 + __u32 _pad; 130 130 }; 131 131 132 132 union drm_amdgpu_bo_list { ··· 150 150 151 151 struct drm_amdgpu_ctx_in { 152 152 /** AMDGPU_CTX_OP_* */ 153 - uint32_t op; 153 + __u32 op; 154 154 /** For future use, no flags defined so far */ 155 - uint32_t flags; 156 - uint32_t ctx_id; 157 - uint32_t _pad; 155 + __u32 flags; 156 + __u32 ctx_id; 157 + __u32 _pad; 158 158 }; 159 159 160 160 union drm_amdgpu_ctx_out { 161 161 struct { 162 - uint32_t ctx_id; 163 - uint32_t _pad; 162 + __u32 ctx_id; 163 + __u32 _pad; 164 164 } alloc; 165 165 166 166 struct { 167 167 /** For future use, no flags defined so far */ 168 - uint64_t flags; 168 + __u64 flags; 169 169 /** Number of resets caused by this context so far. */ 170 - uint32_t hangs; 170 + __u32 hangs; 171 171 /** Reset status since the last call of the ioctl. */ 172 - uint32_t reset_status; 172 + __u32 reset_status; 173 173 } state; 174 174 }; 175 175 ··· 189 189 #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 190 190 191 191 struct drm_amdgpu_gem_userptr { 192 - uint64_t addr; 193 - uint64_t size; 192 + __u64 addr; 193 + __u64 size; 194 194 /* AMDGPU_GEM_USERPTR_* */ 195 - uint32_t flags; 195 + __u32 flags; 196 196 /* Resulting GEM handle */ 197 - uint32_t handle; 197 + __u32 handle; 198 198 }; 199 199 200 200 /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ ··· 226 226 /** The same structure is shared for input/output */ 227 227 struct drm_amdgpu_gem_metadata { 228 228 /** GEM Object handle */ 229 - uint32_t handle; 229 + __u32 handle; 230 230 /** Do we want get or set metadata */ 231 - uint32_t op; 231 + __u32 op; 232 232 struct { 233 233 /** For future use, no flags defined so far */ 234 - uint64_t flags; 234 + __u64 flags; 235 235 /** family specific tiling info */ 236 - uint64_t tiling_info; 237 - uint32_t data_size_bytes; 238 - uint32_t data[64]; 236 + __u64 tiling_info; 237 + __u32 data_size_bytes; 238 + __u32 data[64]; 239 239 } data; 240 240 }; 241 241 242 242 struct drm_amdgpu_gem_mmap_in { 243 243 /** the GEM object handle */ 244 - uint32_t handle; 245 - uint32_t _pad; 244 + __u32 handle; 245 + __u32 _pad; 246 246 }; 247 247 248 248 struct drm_amdgpu_gem_mmap_out { 249 249 /** mmap offset from the vma offset manager */ 250 - uint64_t addr_ptr; 250 + __u64 addr_ptr; 251 251 }; 252 252 253 253 union drm_amdgpu_gem_mmap { ··· 257 257 258 258 struct drm_amdgpu_gem_wait_idle_in { 259 259 /** GEM object handle */ 260 - uint32_t handle; 260 + __u32 handle; 261 261 /** For future use, no flags defined so far */ 262 - uint32_t flags; 262 + __u32 flags; 263 263 /** Absolute timeout to wait */ 264 - uint64_t timeout; 264 + __u64 timeout; 265 265 }; 266 266 267 267 struct drm_amdgpu_gem_wait_idle_out { 268 268 /** BO status: 0 - BO is idle, 1 - BO is busy */ 269 - uint32_t status; 269 + __u32 status; 270 270 /** Returned current memory domain */ 271 - uint32_t domain; 271 + __u32 domain; 272 272 }; 273 273 274 274 union drm_amdgpu_gem_wait_idle { ··· 278 278 279 279 struct drm_amdgpu_wait_cs_in { 280 280 /** Command submission handle */ 281 - uint64_t handle; 281 + __u64 handle; 282 282 /** Absolute timeout to wait */ 283 - uint64_t timeout; 284 - uint32_t ip_type; 285 - uint32_t ip_instance; 286 - uint32_t ring; 287 - uint32_t ctx_id; 283 + __u64 timeout; 284 + __u32 ip_type; 285 + __u32 ip_instance; 286 + __u32 ring; 287 + __u32 ctx_id; 288 288 }; 289 289 290 290 struct drm_amdgpu_wait_cs_out { 291 291 /** CS status: 0 - CS completed, 1 - CS still busy */ 292 - uint64_t status; 292 + __u64 status; 293 293 }; 294 294 295 295 union drm_amdgpu_wait_cs { ··· 303 303 /* Sets or returns a value associated with a buffer. */ 304 304 struct drm_amdgpu_gem_op { 305 305 /** GEM object handle */ 306 - uint32_t handle; 306 + __u32 handle; 307 307 /** AMDGPU_GEM_OP_* */ 308 - uint32_t op; 308 + __u32 op; 309 309 /** Input or return value */ 310 - uint64_t value; 310 + __u64 value; 311 311 }; 312 312 313 313 #define AMDGPU_VA_OP_MAP 1 ··· 326 326 327 327 struct drm_amdgpu_gem_va { 328 328 /** GEM object handle */ 329 - uint32_t handle; 330 - uint32_t _pad; 329 + __u32 handle; 330 + __u32 _pad; 331 331 /** AMDGPU_VA_OP_* */ 332 - uint32_t operation; 332 + __u32 operation; 333 333 /** AMDGPU_VM_PAGE_* */ 334 - uint32_t flags; 334 + __u32 flags; 335 335 /** va address to assign . Must be correctly aligned.*/ 336 - uint64_t va_address; 336 + __u64 va_address; 337 337 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 338 - uint64_t offset_in_bo; 338 + __u64 offset_in_bo; 339 339 /** Specify mapping size. Must be correctly aligned. */ 340 - uint64_t map_size; 340 + __u64 map_size; 341 341 }; 342 342 343 343 #define AMDGPU_HW_IP_GFX 0 ··· 354 354 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 355 355 356 356 struct drm_amdgpu_cs_chunk { 357 - uint32_t chunk_id; 358 - uint32_t length_dw; 359 - uint64_t chunk_data; 357 + __u32 chunk_id; 358 + __u32 length_dw; 359 + __u64 chunk_data; 360 360 }; 361 361 362 362 struct drm_amdgpu_cs_in { 363 363 /** Rendering context id */ 364 - uint32_t ctx_id; 364 + __u32 ctx_id; 365 365 /** Handle of resource list associated with CS */ 366 - uint32_t bo_list_handle; 367 - uint32_t num_chunks; 368 - uint32_t _pad; 369 - /** this points to uint64_t * which point to cs chunks */ 370 - uint64_t chunks; 366 + __u32 bo_list_handle; 367 + __u32 num_chunks; 368 + __u32 _pad; 369 + /** this points to __u64 * which point to cs chunks */ 370 + __u64 chunks; 371 371 }; 372 372 373 373 struct drm_amdgpu_cs_out { 374 - uint64_t handle; 374 + __u64 handle; 375 375 }; 376 376 377 377 union drm_amdgpu_cs { ··· 388 388 #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 389 389 390 390 struct drm_amdgpu_cs_chunk_ib { 391 - uint32_t _pad; 391 + __u32 _pad; 392 392 /** AMDGPU_IB_FLAG_* */ 393 - uint32_t flags; 393 + __u32 flags; 394 394 /** Virtual address to begin IB execution */ 395 - uint64_t va_start; 395 + __u64 va_start; 396 396 /** Size of submission */ 397 - uint32_t ib_bytes; 397 + __u32 ib_bytes; 398 398 /** HW IP to submit to */ 399 - uint32_t ip_type; 399 + __u32 ip_type; 400 400 /** HW IP index of the same type to submit to */ 401 - uint32_t ip_instance; 401 + __u32 ip_instance; 402 402 /** Ring index to submit to */ 403 - uint32_t ring; 403 + __u32 ring; 404 404 }; 405 405 406 406 struct drm_amdgpu_cs_chunk_dep { 407 - uint32_t ip_type; 408 - uint32_t ip_instance; 409 - uint32_t ring; 410 - uint32_t ctx_id; 411 - uint64_t handle; 407 + __u32 ip_type; 408 + __u32 ip_instance; 409 + __u32 ring; 410 + __u32 ctx_id; 411 + __u64 handle; 412 412 }; 413 413 414 414 struct drm_amdgpu_cs_chunk_fence { 415 - uint32_t handle; 416 - uint32_t offset; 415 + __u32 handle; 416 + __u32 offset; 417 417 }; 418 418 419 419 struct drm_amdgpu_cs_chunk_data { ··· 486 486 /* Input structure for the INFO ioctl */ 487 487 struct drm_amdgpu_info { 488 488 /* Where the return value will be stored */ 489 - uint64_t return_pointer; 489 + __u64 return_pointer; 490 490 /* The size of the return value. Just like "size" in "snprintf", 491 491 * it limits how many bytes the kernel can write. */ 492 - uint32_t return_size; 492 + __u32 return_size; 493 493 /* The query request id. */ 494 - uint32_t query; 494 + __u32 query; 495 495 496 496 union { 497 497 struct { 498 - uint32_t id; 499 - uint32_t _pad; 498 + __u32 id; 499 + __u32 _pad; 500 500 } mode_crtc; 501 501 502 502 struct { 503 503 /** AMDGPU_HW_IP_* */ 504 - uint32_t type; 504 + __u32 type; 505 505 /** 506 506 * Index of the IP if there are more IPs of the same 507 507 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 508 508 */ 509 - uint32_t ip_instance; 509 + __u32 ip_instance; 510 510 } query_hw_ip; 511 511 512 512 struct { 513 - uint32_t dword_offset; 513 + __u32 dword_offset; 514 514 /** number of registers to read */ 515 - uint32_t count; 516 - uint32_t instance; 515 + __u32 count; 516 + __u32 instance; 517 517 /** For future use, no flags defined so far */ 518 - uint32_t flags; 518 + __u32 flags; 519 519 } read_mmr_reg; 520 520 521 521 struct { 522 522 /** AMDGPU_INFO_FW_* */ 523 - uint32_t fw_type; 523 + __u32 fw_type; 524 524 /** 525 525 * Index of the IP if there are more IPs of 526 526 * the same type. 527 527 */ 528 - uint32_t ip_instance; 528 + __u32 ip_instance; 529 529 /** 530 530 * Index of the engine. Whether this is used depends 531 531 * on the firmware type. (e.g. MEC, SDMA) 532 532 */ 533 - uint32_t index; 534 - uint32_t _pad; 533 + __u32 index; 534 + __u32 _pad; 535 535 } query_fw; 536 536 }; 537 537 }; 538 538 539 539 struct drm_amdgpu_info_gds { 540 540 /** GDS GFX partition size */ 541 - uint32_t gds_gfx_partition_size; 541 + __u32 gds_gfx_partition_size; 542 542 /** GDS compute partition size */ 543 - uint32_t compute_partition_size; 543 + __u32 compute_partition_size; 544 544 /** total GDS memory size */ 545 - uint32_t gds_total_size; 545 + __u32 gds_total_size; 546 546 /** GWS size per GFX partition */ 547 - uint32_t gws_per_gfx_partition; 547 + __u32 gws_per_gfx_partition; 548 548 /** GSW size per compute partition */ 549 - uint32_t gws_per_compute_partition; 549 + __u32 gws_per_compute_partition; 550 550 /** OA size per GFX partition */ 551 - uint32_t oa_per_gfx_partition; 551 + __u32 oa_per_gfx_partition; 552 552 /** OA size per compute partition */ 553 - uint32_t oa_per_compute_partition; 554 - uint32_t _pad; 553 + __u32 oa_per_compute_partition; 554 + __u32 _pad; 555 555 }; 556 556 557 557 struct drm_amdgpu_info_vram_gtt { 558 - uint64_t vram_size; 559 - uint64_t vram_cpu_accessible_size; 560 - uint64_t gtt_size; 558 + __u64 vram_size; 559 + __u64 vram_cpu_accessible_size; 560 + __u64 gtt_size; 561 561 }; 562 562 563 563 struct drm_amdgpu_info_firmware { 564 - uint32_t ver; 565 - uint32_t feature; 564 + __u32 ver; 565 + __u32 feature; 566 566 }; 567 567 568 568 #define AMDGPU_VRAM_TYPE_UNKNOWN 0 ··· 576 576 577 577 struct drm_amdgpu_info_device { 578 578 /** PCI Device ID */ 579 - uint32_t device_id; 579 + __u32 device_id; 580 580 /** Internal chip revision: A0, A1, etc.) */ 581 - uint32_t chip_rev; 582 - uint32_t external_rev; 581 + __u32 chip_rev; 582 + __u32 external_rev; 583 583 /** Revision id in PCI Config space */ 584 - uint32_t pci_rev; 585 - uint32_t family; 586 - uint32_t num_shader_engines; 587 - uint32_t num_shader_arrays_per_engine; 584 + __u32 pci_rev; 585 + __u32 family; 586 + __u32 num_shader_engines; 587 + __u32 num_shader_arrays_per_engine; 588 588 /* in KHz */ 589 - uint32_t gpu_counter_freq; 590 - uint64_t max_engine_clock; 591 - uint64_t max_memory_clock; 589 + __u32 gpu_counter_freq; 590 + __u64 max_engine_clock; 591 + __u64 max_memory_clock; 592 592 /* cu information */ 593 - uint32_t cu_active_number; 594 - uint32_t cu_ao_mask; 595 - uint32_t cu_bitmap[4][4]; 593 + __u32 cu_active_number; 594 + __u32 cu_ao_mask; 595 + __u32 cu_bitmap[4][4]; 596 596 /** Render backend pipe mask. One render backend is CB+DB. */ 597 - uint32_t enabled_rb_pipes_mask; 598 - uint32_t num_rb_pipes; 599 - uint32_t num_hw_gfx_contexts; 600 - uint32_t _pad; 601 - uint64_t ids_flags; 597 + __u32 enabled_rb_pipes_mask; 598 + __u32 num_rb_pipes; 599 + __u32 num_hw_gfx_contexts; 600 + __u32 _pad; 601 + __u64 ids_flags; 602 602 /** Starting virtual address for UMDs. */ 603 - uint64_t virtual_address_offset; 603 + __u64 virtual_address_offset; 604 604 /** The maximum virtual address */ 605 - uint64_t virtual_address_max; 605 + __u64 virtual_address_max; 606 606 /** Required alignment of virtual addresses. */ 607 - uint32_t virtual_address_alignment; 607 + __u32 virtual_address_alignment; 608 608 /** Page table entry - fragment size */ 609 - uint32_t pte_fragment_size; 610 - uint32_t gart_page_size; 609 + __u32 pte_fragment_size; 610 + __u32 gart_page_size; 611 611 /** constant engine ram size*/ 612 - uint32_t ce_ram_size; 612 + __u32 ce_ram_size; 613 613 /** video memory type info*/ 614 - uint32_t vram_type; 614 + __u32 vram_type; 615 615 /** video memory bit width*/ 616 - uint32_t vram_bit_width; 616 + __u32 vram_bit_width; 617 617 /* vce harvesting instance */ 618 - uint32_t vce_harvest_config; 618 + __u32 vce_harvest_config; 619 619 }; 620 620 621 621 struct drm_amdgpu_info_hw_ip { 622 622 /** Version of h/w IP */ 623 - uint32_t hw_ip_version_major; 624 - uint32_t hw_ip_version_minor; 623 + __u32 hw_ip_version_major; 624 + __u32 hw_ip_version_minor; 625 625 /** Capabilities */ 626 - uint64_t capabilities_flags; 626 + __u64 capabilities_flags; 627 627 /** command buffer address start alignment*/ 628 - uint32_t ib_start_alignment; 628 + __u32 ib_start_alignment; 629 629 /** command buffer size alignment*/ 630 - uint32_t ib_size_alignment; 630 + __u32 ib_size_alignment; 631 631 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 632 - uint32_t available_rings; 633 - uint32_t _pad; 632 + __u32 available_rings; 633 + __u32 _pad; 634 634 }; 635 635 636 636 /*
+2
include/uapi/drm/armada_drm.h
··· 9 9 #ifndef DRM_ARMADA_IOCTL_H 10 10 #define DRM_ARMADA_IOCTL_H 11 11 12 + #include "drm.h" 13 + 12 14 #define DRM_ARMADA_GEM_CREATE 0x00 13 15 #define DRM_ARMADA_GEM_MMAP 0x02 14 16 #define DRM_ARMADA_GEM_PWRITE 0x03
+5 -4
include/uapi/drm/drm.h
··· 54 54 typedef uint32_t __u32; 55 55 typedef int64_t __s64; 56 56 typedef uint64_t __u64; 57 + typedef size_t __kernel_size_t; 57 58 typedef unsigned long drm_handle_t; 58 59 59 60 #endif ··· 130 129 int version_major; /**< Major version */ 131 130 int version_minor; /**< Minor version */ 132 131 int version_patchlevel; /**< Patch level */ 133 - size_t name_len; /**< Length of name buffer */ 132 + __kernel_size_t name_len; /**< Length of name buffer */ 134 133 char __user *name; /**< Name of driver */ 135 - size_t date_len; /**< Length of date buffer */ 134 + __kernel_size_t date_len; /**< Length of date buffer */ 136 135 char __user *date; /**< User-space buffer to hold date */ 137 - size_t desc_len; /**< Length of desc buffer */ 136 + __kernel_size_t desc_len; /**< Length of desc buffer */ 138 137 char __user *desc; /**< User-space buffer to hold desc */ 139 138 }; 140 139 ··· 144 143 * \sa drmGetBusid() and drmSetBusId(). 145 144 */ 146 145 struct drm_unique { 147 - size_t unique_len; /**< Length of unique */ 146 + __kernel_size_t unique_len; /**< Length of unique */ 148 147 char __user *unique; /**< Unique name for driver instantiation */ 149 148 }; 150 149
+1 -1
include/uapi/drm/drm_fourcc.h
··· 24 24 #ifndef DRM_FOURCC_H 25 25 #define DRM_FOURCC_H 26 26 27 - #include <linux/types.h> 27 + #include "drm.h" 28 28 29 29 #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ 30 30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
+9 -9
include/uapi/drm/drm_mode.h
··· 27 27 #ifndef _DRM_MODE_H 28 28 #define _DRM_MODE_H 29 29 30 - #include <linux/types.h> 30 + #include "drm.h" 31 31 32 32 #define DRM_DISPLAY_INFO_LEN 32 33 33 #define DRM_CONNECTOR_NAME_LEN 32 ··· 526 526 527 527 /* create a dumb scanout buffer */ 528 528 struct drm_mode_create_dumb { 529 - uint32_t height; 530 - uint32_t width; 531 - uint32_t bpp; 532 - uint32_t flags; 529 + __u32 height; 530 + __u32 width; 531 + __u32 bpp; 532 + __u32 flags; 533 533 /* handle, pitch, size will be returned */ 534 - uint32_t handle; 535 - uint32_t pitch; 536 - uint64_t size; 534 + __u32 handle; 535 + __u32 pitch; 536 + __u64 size; 537 537 }; 538 538 539 539 /* set up for mmap of a dumb scanout buffer */ ··· 550 550 }; 551 551 552 552 struct drm_mode_destroy_dumb { 553 - uint32_t handle; 553 + __u32 handle; 554 554 }; 555 555 556 556 /* page-flip flags are valid, plus: */
+1 -1
include/uapi/drm/drm_sarea.h
··· 32 32 #ifndef _DRM_SAREA_H_ 33 33 #define _DRM_SAREA_H_ 34 34 35 - #include <drm/drm.h> 35 + #include "drm.h" 36 36 37 37 /* SAREA area needs to be at least a page */ 38 38 #if defined(__alpha__)
+4 -4
include/uapi/drm/exynos_drm.h
··· 15 15 #ifndef _UAPI_EXYNOS_DRM_H_ 16 16 #define _UAPI_EXYNOS_DRM_H_ 17 17 18 - #include <drm/drm.h> 18 + #include "drm.h" 19 19 20 20 /** 21 21 * User-desired buffer creation information structure. ··· 27 27 * - this handle will be set by gem module of kernel side. 28 28 */ 29 29 struct drm_exynos_gem_create { 30 - uint64_t size; 30 + __u64 size; 31 31 unsigned int flags; 32 32 unsigned int handle; 33 33 }; ··· 44 44 struct drm_exynos_gem_info { 45 45 unsigned int handle; 46 46 unsigned int flags; 47 - uint64_t size; 47 + __u64 size; 48 48 }; 49 49 50 50 /** ··· 58 58 struct drm_exynos_vidi_connection { 59 59 unsigned int connection; 60 60 unsigned int extensions; 61 - uint64_t edid; 61 + __u64 edid; 62 62 }; 63 63 64 64 /* memory type definitions. */
+1 -1
include/uapi/drm/i810_drm.h
··· 1 1 #ifndef _I810_DRM_H_ 2 2 #define _I810_DRM_H_ 3 3 4 - #include <drm/drm.h> 4 + #include "drm.h" 5 5 6 6 /* WARNING: These defines must be the same as what the Xserver uses. 7 7 * if you change them, you must change the defines in the Xserver.
+1 -1
include/uapi/drm/i915_drm.h
··· 27 27 #ifndef _UAPI_I915_DRM_H_ 28 28 #define _UAPI_I915_DRM_H_ 29 29 30 - #include <drm/drm.h> 30 + #include "drm.h" 31 31 32 32 /* Please note that modifications to all structs defined here are 33 33 * subject to backwards-compatibility constraints.
+1 -1
include/uapi/drm/mga_drm.h
··· 35 35 #ifndef __MGA_DRM_H__ 36 36 #define __MGA_DRM_H__ 37 37 38 - #include <drm/drm.h> 38 + #include "drm.h" 39 39 40 40 /* WARNING: If you change any of these defines, make sure to change the 41 41 * defines in the Xserver file (mga_sarea.h)
+1 -2
include/uapi/drm/msm_drm.h
··· 18 18 #ifndef __MSM_DRM_H__ 19 19 #define __MSM_DRM_H__ 20 20 21 - #include <stddef.h> 22 - #include <drm/drm.h> 21 + #include "drm.h" 23 22 24 23 /* Please note that modifications to all structs defined here are 25 24 * subject to backwards-compatibility constraints:
+44 -42
include/uapi/drm/nouveau_drm.h
··· 27 27 28 28 #define DRM_NOUVEAU_EVENT_NVIF 0x80000000 29 29 30 + #include <drm/drm.h> 31 + 30 32 #define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) 31 33 #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 32 34 #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) ··· 43 41 #define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 44 42 45 43 struct drm_nouveau_gem_info { 46 - uint32_t handle; 47 - uint32_t domain; 48 - uint64_t size; 49 - uint64_t offset; 50 - uint64_t map_handle; 51 - uint32_t tile_mode; 52 - uint32_t tile_flags; 44 + __u32 handle; 45 + __u32 domain; 46 + __u64 size; 47 + __u64 offset; 48 + __u64 map_handle; 49 + __u32 tile_mode; 50 + __u32 tile_flags; 53 51 }; 54 52 55 53 struct drm_nouveau_gem_new { 56 54 struct drm_nouveau_gem_info info; 57 - uint32_t channel_hint; 58 - uint32_t align; 55 + __u32 channel_hint; 56 + __u32 align; 59 57 }; 60 58 61 59 #define NOUVEAU_GEM_MAX_BUFFERS 1024 62 60 struct drm_nouveau_gem_pushbuf_bo_presumed { 63 - uint32_t valid; 64 - uint32_t domain; 65 - uint64_t offset; 61 + __u32 valid; 62 + __u32 domain; 63 + __u64 offset; 66 64 }; 67 65 68 66 struct drm_nouveau_gem_pushbuf_bo { 69 - uint64_t user_priv; 70 - uint32_t handle; 71 - uint32_t read_domains; 72 - uint32_t write_domains; 73 - uint32_t valid_domains; 67 + __u64 user_priv; 68 + __u32 handle; 69 + __u32 read_domains; 70 + __u32 write_domains; 71 + __u32 valid_domains; 74 72 struct drm_nouveau_gem_pushbuf_bo_presumed presumed; 75 73 }; 76 74 ··· 79 77 #define NOUVEAU_GEM_RELOC_OR (1 << 2) 80 78 #define NOUVEAU_GEM_MAX_RELOCS 1024 81 79 struct drm_nouveau_gem_pushbuf_reloc { 82 - uint32_t reloc_bo_index; 83 - uint32_t reloc_bo_offset; 84 - uint32_t bo_index; 85 - uint32_t flags; 86 - uint32_t data; 87 - uint32_t vor; 88 - uint32_t tor; 80 + __u32 reloc_bo_index; 81 + __u32 reloc_bo_offset; 82 + __u32 bo_index; 83 + __u32 flags; 84 + __u32 data; 85 + __u32 vor; 86 + __u32 tor; 89 87 }; 90 88 91 89 #define NOUVEAU_GEM_MAX_PUSH 512 92 90 struct drm_nouveau_gem_pushbuf_push { 93 - uint32_t bo_index; 94 - uint32_t pad; 95 - uint64_t offset; 96 - uint64_t length; 91 + __u32 bo_index; 92 + __u32 pad; 93 + __u64 offset; 94 + __u64 length; 97 95 }; 98 96 99 97 struct drm_nouveau_gem_pushbuf { 100 - uint32_t channel; 101 - uint32_t nr_buffers; 102 - uint64_t buffers; 103 - uint32_t nr_relocs; 104 - uint32_t nr_push; 105 - uint64_t relocs; 106 - uint64_t push; 107 - uint32_t suffix0; 108 - uint32_t suffix1; 109 - uint64_t vram_available; 110 - uint64_t gart_available; 98 + __u32 channel; 99 + __u32 nr_buffers; 100 + __u64 buffers; 101 + __u32 nr_relocs; 102 + __u32 nr_push; 103 + __u64 relocs; 104 + __u64 push; 105 + __u32 suffix0; 106 + __u32 suffix1; 107 + __u64 vram_available; 108 + __u64 gart_available; 111 109 }; 112 110 113 111 #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 114 112 #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 115 113 struct drm_nouveau_gem_cpu_prep { 116 - uint32_t handle; 117 - uint32_t flags; 114 + __u32 handle; 115 + __u32 flags; 118 116 }; 119 117 120 118 struct drm_nouveau_gem_cpu_fini { 121 - uint32_t handle; 119 + __u32 handle; 122 120 }; 123 121 124 122 #define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+1 -1
include/uapi/drm/omap_drm.h
··· 20 20 #ifndef __OMAP_DRM_H__ 21 21 #define __OMAP_DRM_H__ 22 22 23 - #include <drm/drm.h> 23 + #include "drm.h" 24 24 25 25 /* Please note that modifications to all structs defined here are 26 26 * subject to backwards-compatibility constraints.
+38 -39
include/uapi/drm/qxl_drm.h
··· 24 24 #ifndef QXL_DRM_H 25 25 #define QXL_DRM_H 26 26 27 - #include <stddef.h> 28 - #include "drm/drm.h" 27 + #include "drm.h" 29 28 30 29 /* Please note that modifications to all structs defined here are 31 30 * subject to backwards-compatibility constraints. 32 31 * 33 - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel 34 33 * compatibility Keep fields aligned to their size 35 34 */ 36 35 ··· 47 48 #define DRM_QXL_ALLOC_SURF 0x06 48 49 49 50 struct drm_qxl_alloc { 50 - uint32_t size; 51 - uint32_t handle; /* 0 is an invalid handle */ 51 + __u32 size; 52 + __u32 handle; /* 0 is an invalid handle */ 52 53 }; 53 54 54 55 struct drm_qxl_map { 55 - uint64_t offset; /* use for mmap system call */ 56 - uint32_t handle; 57 - uint32_t pad; 56 + __u64 offset; /* use for mmap system call */ 57 + __u32 handle; 58 + __u32 pad; 58 59 }; 59 60 60 61 /* ··· 67 68 #define QXL_RELOC_TYPE_SURF 2 68 69 69 70 struct drm_qxl_reloc { 70 - uint64_t src_offset; /* offset into src_handle or src buffer */ 71 - uint64_t dst_offset; /* offset in dest handle */ 72 - uint32_t src_handle; /* dest handle to compute address from */ 73 - uint32_t dst_handle; /* 0 if to command buffer */ 74 - uint32_t reloc_type; 75 - uint32_t pad; 71 + __u64 src_offset; /* offset into src_handle or src buffer */ 72 + __u64 dst_offset; /* offset in dest handle */ 73 + __u32 src_handle; /* dest handle to compute address from */ 74 + __u32 dst_handle; /* 0 if to command buffer */ 75 + __u32 reloc_type; 76 + __u32 pad; 76 77 }; 77 78 78 79 struct drm_qxl_command { 79 - uint64_t __user command; /* void* */ 80 - uint64_t __user relocs; /* struct drm_qxl_reloc* */ 81 - uint32_t type; 82 - uint32_t command_size; 83 - uint32_t relocs_num; 84 - uint32_t pad; 80 + __u64 __user command; /* void* */ 81 + __u64 __user relocs; /* struct drm_qxl_reloc* */ 82 + __u32 type; 83 + __u32 command_size; 84 + __u32 relocs_num; 85 + __u32 pad; 85 86 }; 86 87 87 88 /* XXX: call it drm_qxl_commands? */ 88 89 struct drm_qxl_execbuffer { 89 - uint32_t flags; /* for future use */ 90 - uint32_t commands_num; 91 - uint64_t __user commands; /* struct drm_qxl_command* */ 90 + __u32 flags; /* for future use */ 91 + __u32 commands_num; 92 + __u64 __user commands; /* struct drm_qxl_command* */ 92 93 }; 93 94 94 95 struct drm_qxl_update_area { 95 - uint32_t handle; 96 - uint32_t top; 97 - uint32_t left; 98 - uint32_t bottom; 99 - uint32_t right; 100 - uint32_t pad; 96 + __u32 handle; 97 + __u32 top; 98 + __u32 left; 99 + __u32 bottom; 100 + __u32 right; 101 + __u32 pad; 101 102 }; 102 103 103 104 #define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ 104 105 #define QXL_PARAM_MAX_RELOCS 2 105 106 struct drm_qxl_getparam { 106 - uint64_t param; 107 - uint64_t value; 107 + __u64 param; 108 + __u64 value; 108 109 }; 109 110 110 111 /* these are one bit values */ 111 112 struct drm_qxl_clientcap { 112 - uint32_t index; 113 - uint32_t pad; 113 + __u32 index; 114 + __u32 pad; 114 115 }; 115 116 116 117 struct drm_qxl_alloc_surf { 117 - uint32_t format; 118 - uint32_t width; 119 - uint32_t height; 120 - int32_t stride; 121 - uint32_t handle; 122 - uint32_t pad; 118 + __u32 format; 119 + __u32 width; 120 + __u32 height; 121 + __s32 stride; 122 + __u32 handle; 123 + __u32 pad; 123 124 }; 124 125 125 126 #define DRM_IOCTL_QXL_ALLOC \
+1 -1
include/uapi/drm/r128_drm.h
··· 33 33 #ifndef __R128_DRM_H__ 34 34 #define __R128_DRM_H__ 35 35 36 - #include <drm/drm.h> 36 + #include "drm.h" 37 37 38 38 /* WARNING: If you change any of these defines, make sure to change the 39 39 * defines in the X server file (r128_sarea.h)
+64 -64
include/uapi/drm/radeon_drm.h
··· 793 793 #define RADEON_GEM_DOMAIN_VRAM 0x4 794 794 795 795 struct drm_radeon_gem_info { 796 - uint64_t gart_size; 797 - uint64_t vram_size; 798 - uint64_t vram_visible; 796 + __u64 gart_size; 797 + __u64 vram_size; 798 + __u64 vram_visible; 799 799 }; 800 800 801 801 #define RADEON_GEM_NO_BACKING_STORE (1 << 0) ··· 807 807 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4) 808 808 809 809 struct drm_radeon_gem_create { 810 - uint64_t size; 811 - uint64_t alignment; 812 - uint32_t handle; 813 - uint32_t initial_domain; 814 - uint32_t flags; 810 + __u64 size; 811 + __u64 alignment; 812 + __u32 handle; 813 + __u32 initial_domain; 814 + __u32 flags; 815 815 }; 816 816 817 817 /* ··· 825 825 #define RADEON_GEM_USERPTR_REGISTER (1 << 3) 826 826 827 827 struct drm_radeon_gem_userptr { 828 - uint64_t addr; 829 - uint64_t size; 830 - uint32_t flags; 831 - uint32_t handle; 828 + __u64 addr; 829 + __u64 size; 830 + __u32 flags; 831 + __u32 handle; 832 832 }; 833 833 834 834 #define RADEON_TILING_MACRO 0x1 ··· 850 850 #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf 851 851 852 852 struct drm_radeon_gem_set_tiling { 853 - uint32_t handle; 854 - uint32_t tiling_flags; 855 - uint32_t pitch; 853 + __u32 handle; 854 + __u32 tiling_flags; 855 + __u32 pitch; 856 856 }; 857 857 858 858 struct drm_radeon_gem_get_tiling { 859 - uint32_t handle; 860 - uint32_t tiling_flags; 861 - uint32_t pitch; 859 + __u32 handle; 860 + __u32 tiling_flags; 861 + __u32 pitch; 862 862 }; 863 863 864 864 struct drm_radeon_gem_mmap { 865 - uint32_t handle; 866 - uint32_t pad; 867 - uint64_t offset; 868 - uint64_t size; 869 - uint64_t addr_ptr; 865 + __u32 handle; 866 + __u32 pad; 867 + __u64 offset; 868 + __u64 size; 869 + __u64 addr_ptr; 870 870 }; 871 871 872 872 struct drm_radeon_gem_set_domain { 873 - uint32_t handle; 874 - uint32_t read_domains; 875 - uint32_t write_domain; 873 + __u32 handle; 874 + __u32 read_domains; 875 + __u32 write_domain; 876 876 }; 877 877 878 878 struct drm_radeon_gem_wait_idle { 879 - uint32_t handle; 880 - uint32_t pad; 879 + __u32 handle; 880 + __u32 pad; 881 881 }; 882 882 883 883 struct drm_radeon_gem_busy { 884 - uint32_t handle; 885 - uint32_t domain; 884 + __u32 handle; 885 + __u32 domain; 886 886 }; 887 887 888 888 struct drm_radeon_gem_pread { 889 889 /** Handle for the object being read. */ 890 - uint32_t handle; 891 - uint32_t pad; 890 + __u32 handle; 891 + __u32 pad; 892 892 /** Offset into the object to read from */ 893 - uint64_t offset; 893 + __u64 offset; 894 894 /** Length of data to read */ 895 - uint64_t size; 895 + __u64 size; 896 896 /** Pointer to write the data into. */ 897 897 /* void *, but pointers are not 32/64 compatible */ 898 - uint64_t data_ptr; 898 + __u64 data_ptr; 899 899 }; 900 900 901 901 struct drm_radeon_gem_pwrite { 902 902 /** Handle for the object being written to. */ 903 - uint32_t handle; 904 - uint32_t pad; 903 + __u32 handle; 904 + __u32 pad; 905 905 /** Offset into the object to write to */ 906 - uint64_t offset; 906 + __u64 offset; 907 907 /** Length of data to write */ 908 - uint64_t size; 908 + __u64 size; 909 909 /** Pointer to read the data from. */ 910 910 /* void *, but pointers are not 32/64 compatible */ 911 - uint64_t data_ptr; 911 + __u64 data_ptr; 912 912 }; 913 913 914 914 /* Sets or returns a value associated with a buffer. */ 915 915 struct drm_radeon_gem_op { 916 - uint32_t handle; /* buffer */ 917 - uint32_t op; /* RADEON_GEM_OP_* */ 918 - uint64_t value; /* input or return value */ 916 + __u32 handle; /* buffer */ 917 + __u32 op; /* RADEON_GEM_OP_* */ 918 + __u64 value; /* input or return value */ 919 919 }; 920 920 921 921 #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 ··· 935 935 #define RADEON_VM_PAGE_SNOOPED (1 << 4) 936 936 937 937 struct drm_radeon_gem_va { 938 - uint32_t handle; 939 - uint32_t operation; 940 - uint32_t vm_id; 941 - uint32_t flags; 942 - uint64_t offset; 938 + __u32 handle; 939 + __u32 operation; 940 + __u32 vm_id; 941 + __u32 flags; 942 + __u64 offset; 943 943 }; 944 944 945 945 #define RADEON_CHUNK_ID_RELOCS 0x01 ··· 961 961 /* 0 = normal, + = higher priority, - = lower priority */ 962 962 963 963 struct drm_radeon_cs_chunk { 964 - uint32_t chunk_id; 965 - uint32_t length_dw; 966 - uint64_t chunk_data; 964 + __u32 chunk_id; 965 + __u32 length_dw; 966 + __u64 chunk_data; 967 967 }; 968 968 969 969 /* drm_radeon_cs_reloc.flags */ 970 970 #define RADEON_RELOC_PRIO_MASK (0xf << 0) 971 971 972 972 struct drm_radeon_cs_reloc { 973 - uint32_t handle; 974 - uint32_t read_domains; 975 - uint32_t write_domain; 976 - uint32_t flags; 973 + __u32 handle; 974 + __u32 read_domains; 975 + __u32 write_domain; 976 + __u32 flags; 977 977 }; 978 978 979 979 struct drm_radeon_cs { 980 - uint32_t num_chunks; 981 - uint32_t cs_id; 982 - /* this points to uint64_t * which point to cs chunks */ 983 - uint64_t chunks; 980 + __u32 num_chunks; 981 + __u32 cs_id; 982 + /* this points to __u64 * which point to cs chunks */ 983 + __u64 chunks; 984 984 /* updates to the limits after this CS ioctl */ 985 - uint64_t gart_limit; 986 - uint64_t vram_limit; 985 + __u64 gart_limit; 986 + __u64 vram_limit; 987 987 }; 988 988 989 989 #define RADEON_INFO_DEVICE_ID 0x00 ··· 1042 1042 #define RADEON_INFO_GPU_RESET_COUNTER 0x26 1043 1043 1044 1044 struct drm_radeon_info { 1045 - uint32_t request; 1046 - uint32_t pad; 1047 - uint64_t value; 1045 + __u32 request; 1046 + __u32 pad; 1047 + __u64 value; 1048 1048 }; 1049 1049 1050 1050 /* Those correspond to the tile index to use, this is to explicitly state
+1 -1
include/uapi/drm/savage_drm.h
··· 26 26 #ifndef __SAVAGE_DRM_H__ 27 27 #define __SAVAGE_DRM_H__ 28 28 29 - #include <drm/drm.h> 29 + #include "drm.h" 30 30 31 31 #ifndef __SAVAGE_SAREA_DEFINES__ 32 32 #define __SAVAGE_SAREA_DEFINES__
+1 -1
include/uapi/drm/tegra_drm.h
··· 23 23 #ifndef _UAPI_TEGRA_DRM_H_ 24 24 #define _UAPI_TEGRA_DRM_H_ 25 25 26 - #include <drm/drm.h> 26 + #include "drm.h" 27 27 28 28 #define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) 29 29 #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+1 -4
include/uapi/drm/via_drm.h
··· 24 24 #ifndef _VIA_DRM_H_ 25 25 #define _VIA_DRM_H_ 26 26 27 - #include <drm/drm.h> 27 + #include "drm.h" 28 28 29 29 /* WARNING: These defines must be the same as what the Xserver uses. 30 30 * if you change them, you must change the defines in the Xserver. ··· 33 33 #ifndef _VIA_DEFINES_ 34 34 #define _VIA_DEFINES_ 35 35 36 - #ifndef __KERNEL__ 37 - #include "via_drmclient.h" 38 - #endif 39 36 40 37 #define VIA_NR_SAREA_CLIPRECTS 8 41 38 #define VIA_NR_XVMC_PORTS 10
+50 -51
include/uapi/drm/virtgpu_drm.h
··· 24 24 #ifndef VIRTGPU_DRM_H 25 25 #define VIRTGPU_DRM_H 26 26 27 - #include <stddef.h> 28 - #include "drm/drm.h" 27 + #include "drm.h" 29 28 30 29 /* Please note that modifications to all structs defined here are 31 30 * subject to backwards-compatibility constraints. 32 31 * 33 - * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 32 + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel 34 33 * compatibility Keep fields aligned to their size 35 34 */ 36 35 ··· 44 45 #define DRM_VIRTGPU_GET_CAPS 0x09 45 46 46 47 struct drm_virtgpu_map { 47 - uint64_t offset; /* use for mmap system call */ 48 - uint32_t handle; 49 - uint32_t pad; 48 + __u64 offset; /* use for mmap system call */ 49 + __u32 handle; 50 + __u32 pad; 50 51 }; 51 52 52 53 struct drm_virtgpu_execbuffer { 53 - uint32_t flags; /* for future use */ 54 - uint32_t size; 55 - uint64_t command; /* void* */ 56 - uint64_t bo_handles; 57 - uint32_t num_bo_handles; 58 - uint32_t pad; 54 + __u32 flags; /* for future use */ 55 + __u32 size; 56 + __u64 command; /* void* */ 57 + __u64 bo_handles; 58 + __u32 num_bo_handles; 59 + __u32 pad; 59 60 }; 60 61 61 62 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 62 63 63 64 struct drm_virtgpu_getparam { 64 - uint64_t param; 65 - uint64_t value; 65 + __u64 param; 66 + __u64 value; 66 67 }; 67 68 68 69 /* NO_BO flags? NO resource flag? */ 69 70 /* resource flag for y_0_top */ 70 71 struct drm_virtgpu_resource_create { 71 - uint32_t target; 72 - uint32_t format; 73 - uint32_t bind; 74 - uint32_t width; 75 - uint32_t height; 76 - uint32_t depth; 77 - uint32_t array_size; 78 - uint32_t last_level; 79 - uint32_t nr_samples; 80 - uint32_t flags; 81 - uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 82 - uint32_t res_handle; /* returned by kernel */ 83 - uint32_t size; /* validate transfer in the host */ 84 - uint32_t stride; /* validate transfer in the host */ 72 + __u32 target; 73 + __u32 format; 74 + __u32 bind; 75 + __u32 width; 76 + __u32 height; 77 + __u32 depth; 78 + __u32 array_size; 79 + __u32 last_level; 80 + __u32 nr_samples; 81 + __u32 flags; 82 + __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 83 + __u32 res_handle; /* returned by kernel */ 84 + __u32 size; /* validate transfer in the host */ 85 + __u32 stride; /* validate transfer in the host */ 85 86 }; 86 87 87 88 struct drm_virtgpu_resource_info { 88 - uint32_t bo_handle; 89 - uint32_t res_handle; 90 - uint32_t size; 91 - uint32_t stride; 89 + __u32 bo_handle; 90 + __u32 res_handle; 91 + __u32 size; 92 + __u32 stride; 92 93 }; 93 94 94 95 struct drm_virtgpu_3d_box { 95 - uint32_t x; 96 - uint32_t y; 97 - uint32_t z; 98 - uint32_t w; 99 - uint32_t h; 100 - uint32_t d; 96 + __u32 x; 97 + __u32 y; 98 + __u32 z; 99 + __u32 w; 100 + __u32 h; 101 + __u32 d; 101 102 }; 102 103 103 104 struct drm_virtgpu_3d_transfer_to_host { 104 - uint32_t bo_handle; 105 + __u32 bo_handle; 105 106 struct drm_virtgpu_3d_box box; 106 - uint32_t level; 107 - uint32_t offset; 107 + __u32 level; 108 + __u32 offset; 108 109 }; 109 110 110 111 struct drm_virtgpu_3d_transfer_from_host { 111 - uint32_t bo_handle; 112 + __u32 bo_handle; 112 113 struct drm_virtgpu_3d_box box; 113 - uint32_t level; 114 - uint32_t offset; 114 + __u32 level; 115 + __u32 offset; 115 116 }; 116 117 117 118 #define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 118 119 struct drm_virtgpu_3d_wait { 119 - uint32_t handle; /* 0 is an invalid handle */ 120 - uint32_t flags; 120 + __u32 handle; /* 0 is an invalid handle */ 121 + __u32 flags; 121 122 }; 122 123 123 124 struct drm_virtgpu_get_caps { 124 - uint32_t cap_set_id; 125 - uint32_t cap_set_ver; 126 - uint64_t addr; 127 - uint32_t size; 128 - uint32_t pad; 125 + __u32 cap_set_id; 126 + __u32 cap_set_ver; 127 + __u64 addr; 128 + __u32 size; 129 + __u32 pad; 129 130 }; 130 131 131 132 #define DRM_IOCTL_VIRTGPU_MAP \
+133 -135
include/uapi/drm/vmwgfx_drm.h
··· 28 28 #ifndef __VMWGFX_DRM_H__ 29 29 #define __VMWGFX_DRM_H__ 30 30 31 - #ifndef __KERNEL__ 32 - #include <drm/drm.h> 33 - #endif 31 + #include "drm.h" 34 32 35 33 #define DRM_VMW_MAX_SURFACE_FACES 6 36 34 #define DRM_VMW_MAX_MIP_LEVELS 24 ··· 109 111 */ 110 112 111 113 struct drm_vmw_getparam_arg { 112 - uint64_t value; 113 - uint32_t param; 114 - uint32_t pad64; 114 + __u64 value; 115 + __u32 param; 116 + __u32 pad64; 115 117 }; 116 118 117 119 /*************************************************************************/ ··· 132 134 */ 133 135 134 136 struct drm_vmw_context_arg { 135 - int32_t cid; 136 - uint32_t pad64; 137 + __s32 cid; 138 + __u32 pad64; 137 139 }; 138 140 139 141 /*************************************************************************/ ··· 163 165 * @mip_levels: Number of mip levels for each face. 164 166 * An unused face should have 0 encoded. 165 167 * @size_addr: Address of a user-space array of sruct drm_vmw_size 166 - * cast to an uint64_t for 32-64 bit compatibility. 168 + * cast to an __u64 for 32-64 bit compatibility. 167 169 * The size of the array should equal the total number of mipmap levels. 168 170 * @shareable: Boolean whether other clients (as identified by file descriptors) 169 171 * may reference this surface. ··· 175 177 */ 176 178 177 179 struct drm_vmw_surface_create_req { 178 - uint32_t flags; 179 - uint32_t format; 180 - uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 181 - uint64_t size_addr; 182 - int32_t shareable; 183 - int32_t scanout; 180 + __u32 flags; 181 + __u32 format; 182 + __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 183 + __u64 size_addr; 184 + __s32 shareable; 185 + __s32 scanout; 184 186 }; 185 187 186 188 /** ··· 195 197 */ 196 198 197 199 struct drm_vmw_surface_arg { 198 - int32_t sid; 200 + __s32 sid; 199 201 enum drm_vmw_handle_type handle_type; 200 202 }; 201 203 ··· 211 213 */ 212 214 213 215 struct drm_vmw_size { 214 - uint32_t width; 215 - uint32_t height; 216 - uint32_t depth; 217 - uint32_t pad64; 216 + __u32 width; 217 + __u32 height; 218 + __u32 depth; 219 + __u32 pad64; 218 220 }; 219 221 220 222 /** ··· 282 284 /** 283 285 * struct drm_vmw_execbuf_arg 284 286 * 285 - * @commands: User-space address of a command buffer cast to an uint64_t. 287 + * @commands: User-space address of a command buffer cast to an __u64. 286 288 * @command-size: Size in bytes of the command buffer. 287 289 * @throttle-us: Sleep until software is less than @throttle_us 288 290 * microseconds ahead of hardware. The driver may round this value 289 291 * to the nearest kernel tick. 290 292 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 291 - * uint64_t. 293 + * __u64. 292 294 * @version: Allows expanding the execbuf ioctl parameters without breaking 293 295 * backwards compatibility, since user-space will always tell the kernel 294 296 * which version it uses. ··· 300 302 #define DRM_VMW_EXECBUF_VERSION 2 301 303 302 304 struct drm_vmw_execbuf_arg { 303 - uint64_t commands; 304 - uint32_t command_size; 305 - uint32_t throttle_us; 306 - uint64_t fence_rep; 307 - uint32_t version; 308 - uint32_t flags; 309 - uint32_t context_handle; 310 - uint32_t pad64; 305 + __u64 commands; 306 + __u32 command_size; 307 + __u32 throttle_us; 308 + __u64 fence_rep; 309 + __u32 version; 310 + __u32 flags; 311 + __u32 context_handle; 312 + __u32 pad64; 311 313 }; 312 314 313 315 /** ··· 336 338 */ 337 339 338 340 struct drm_vmw_fence_rep { 339 - uint32_t handle; 340 - uint32_t mask; 341 - uint32_t seqno; 342 - uint32_t passed_seqno; 343 - uint32_t pad64; 344 - int32_t error; 341 + __u32 handle; 342 + __u32 mask; 343 + __u32 seqno; 344 + __u32 passed_seqno; 345 + __u32 pad64; 346 + __s32 error; 345 347 }; 346 348 347 349 /*************************************************************************/ ··· 371 373 */ 372 374 373 375 struct drm_vmw_alloc_dmabuf_req { 374 - uint32_t size; 375 - uint32_t pad64; 376 + __u32 size; 377 + __u32 pad64; 376 378 }; 377 379 378 380 /** ··· 389 391 */ 390 392 391 393 struct drm_vmw_dmabuf_rep { 392 - uint64_t map_handle; 393 - uint32_t handle; 394 - uint32_t cur_gmr_id; 395 - uint32_t cur_gmr_offset; 396 - uint32_t pad64; 394 + __u64 map_handle; 395 + __u32 handle; 396 + __u32 cur_gmr_id; 397 + __u32 cur_gmr_offset; 398 + __u32 pad64; 397 399 }; 398 400 399 401 /** ··· 426 428 */ 427 429 428 430 struct drm_vmw_unref_dmabuf_arg { 429 - uint32_t handle; 430 - uint32_t pad64; 431 + __u32 handle; 432 + __u32 pad64; 431 433 }; 432 434 433 435 /*************************************************************************/ ··· 450 452 */ 451 453 452 454 struct drm_vmw_rect { 453 - int32_t x; 454 - int32_t y; 455 - uint32_t w; 456 - uint32_t h; 455 + __s32 x; 456 + __s32 y; 457 + __u32 w; 458 + __u32 h; 457 459 }; 458 460 459 461 /** ··· 475 477 */ 476 478 477 479 struct drm_vmw_control_stream_arg { 478 - uint32_t stream_id; 479 - uint32_t enabled; 480 + __u32 stream_id; 481 + __u32 enabled; 480 482 481 - uint32_t flags; 482 - uint32_t color_key; 483 + __u32 flags; 484 + __u32 color_key; 483 485 484 - uint32_t handle; 485 - uint32_t offset; 486 - int32_t format; 487 - uint32_t size; 488 - uint32_t width; 489 - uint32_t height; 490 - uint32_t pitch[3]; 486 + __u32 handle; 487 + __u32 offset; 488 + __s32 format; 489 + __u32 size; 490 + __u32 width; 491 + __u32 height; 492 + __u32 pitch[3]; 491 493 492 - uint32_t pad64; 494 + __u32 pad64; 493 495 struct drm_vmw_rect src; 494 496 struct drm_vmw_rect dst; 495 497 }; ··· 517 519 */ 518 520 519 521 struct drm_vmw_cursor_bypass_arg { 520 - uint32_t flags; 521 - uint32_t crtc_id; 522 - int32_t xpos; 523 - int32_t ypos; 524 - int32_t xhot; 525 - int32_t yhot; 522 + __u32 flags; 523 + __u32 crtc_id; 524 + __s32 xpos; 525 + __s32 ypos; 526 + __s32 xhot; 527 + __s32 yhot; 526 528 }; 527 529 528 530 /*************************************************************************/ ··· 540 542 */ 541 543 542 544 struct drm_vmw_stream_arg { 543 - uint32_t stream_id; 544 - uint32_t pad64; 545 + __u32 stream_id; 546 + __u32 pad64; 545 547 }; 546 548 547 549 /*************************************************************************/ ··· 563 565 /** 564 566 * struct drm_vmw_get_3d_cap_arg 565 567 * 566 - * @buffer: Pointer to a buffer for capability data, cast to an uint64_t 568 + * @buffer: Pointer to a buffer for capability data, cast to an __u64 567 569 * @size: Max size to copy 568 570 * 569 571 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL ··· 571 573 */ 572 574 573 575 struct drm_vmw_get_3d_cap_arg { 574 - uint64_t buffer; 575 - uint32_t max_size; 576 - uint32_t pad64; 576 + __u64 buffer; 577 + __u32 max_size; 578 + __u32 pad64; 577 579 }; 578 580 579 581 /*************************************************************************/ ··· 622 624 */ 623 625 624 626 struct drm_vmw_fence_wait_arg { 625 - uint32_t handle; 626 - int32_t cookie_valid; 627 - uint64_t kernel_cookie; 628 - uint64_t timeout_us; 629 - int32_t lazy; 630 - int32_t flags; 631 - int32_t wait_options; 632 - int32_t pad64; 627 + __u32 handle; 628 + __s32 cookie_valid; 629 + __u64 kernel_cookie; 630 + __u64 timeout_us; 631 + __s32 lazy; 632 + __s32 flags; 633 + __s32 wait_options; 634 + __s32 pad64; 633 635 }; 634 636 635 637 /*************************************************************************/ ··· 653 655 */ 654 656 655 657 struct drm_vmw_fence_signaled_arg { 656 - uint32_t handle; 657 - uint32_t flags; 658 - int32_t signaled; 659 - uint32_t passed_seqno; 660 - uint32_t signaled_flags; 661 - uint32_t pad64; 658 + __u32 handle; 659 + __u32 flags; 660 + __s32 signaled; 661 + __u32 passed_seqno; 662 + __u32 signaled_flags; 663 + __u32 pad64; 662 664 }; 663 665 664 666 /*************************************************************************/ ··· 679 681 */ 680 682 681 683 struct drm_vmw_fence_arg { 682 - uint32_t handle; 683 - uint32_t pad64; 684 + __u32 handle; 685 + __u32 pad64; 684 686 }; 685 687 686 688 ··· 701 703 702 704 struct drm_vmw_event_fence { 703 705 struct drm_event base; 704 - uint64_t user_data; 705 - uint32_t tv_sec; 706 - uint32_t tv_usec; 706 + __u64 user_data; 707 + __u32 tv_sec; 708 + __u32 tv_usec; 707 709 }; 708 710 709 711 /* ··· 715 717 /** 716 718 * struct drm_vmw_fence_event_arg 717 719 * 718 - * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if 720 + * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 719 721 * the fence is not supposed to be referenced by user-space. 720 722 * @user_info: Info to be delivered with the event. 721 723 * @handle: Attach the event to this fence only. 722 724 * @flags: A set of flags as defined above. 723 725 */ 724 726 struct drm_vmw_fence_event_arg { 725 - uint64_t fence_rep; 726 - uint64_t user_data; 727 - uint32_t handle; 728 - uint32_t flags; 727 + __u64 fence_rep; 728 + __u64 user_data; 729 + __u32 handle; 730 + __u32 flags; 729 731 }; 730 732 731 733 ··· 745 747 * @sid: Surface id to present from. 746 748 * @dest_x: X placement coordinate for surface. 747 749 * @dest_y: Y placement coordinate for surface. 748 - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 750 + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 749 751 * @num_clips: Number of cliprects given relative to the framebuffer origin, 750 752 * in the same coordinate space as the frame buffer. 751 753 * @pad64: Unused 64-bit padding. ··· 754 756 */ 755 757 756 758 struct drm_vmw_present_arg { 757 - uint32_t fb_id; 758 - uint32_t sid; 759 - int32_t dest_x; 760 - int32_t dest_y; 761 - uint64_t clips_ptr; 762 - uint32_t num_clips; 763 - uint32_t pad64; 759 + __u32 fb_id; 760 + __u32 sid; 761 + __s32 dest_x; 762 + __s32 dest_y; 763 + __u64 clips_ptr; 764 + __u32 num_clips; 765 + __u32 pad64; 764 766 }; 765 767 766 768 ··· 778 780 * struct drm_vmw_present_arg 779 781 * @fb_id: fb_id to present / read back from. 780 782 * @num_clips: Number of cliprects. 781 - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. 782 - * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. 783 + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 784 + * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 783 785 * If this member is NULL, then the ioctl should not return a fence. 784 786 */ 785 787 786 788 struct drm_vmw_present_readback_arg { 787 - uint32_t fb_id; 788 - uint32_t num_clips; 789 - uint64_t clips_ptr; 790 - uint64_t fence_rep; 789 + __u32 fb_id; 790 + __u32 num_clips; 791 + __u64 clips_ptr; 792 + __u64 fence_rep; 791 793 }; 792 794 793 795 /*************************************************************************/ ··· 803 805 * struct drm_vmw_update_layout_arg 804 806 * 805 807 * @num_outputs: number of active connectors 806 - * @rects: pointer to array of drm_vmw_rect cast to an uint64_t 808 + * @rects: pointer to array of drm_vmw_rect cast to an __u64 807 809 * 808 810 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 809 811 */ 810 812 struct drm_vmw_update_layout_arg { 811 - uint32_t num_outputs; 812 - uint32_t pad64; 813 - uint64_t rects; 813 + __u32 num_outputs; 814 + __u32 pad64; 815 + __u64 rects; 814 816 }; 815 817 816 818 ··· 847 849 */ 848 850 struct drm_vmw_shader_create_arg { 849 851 enum drm_vmw_shader_type shader_type; 850 - uint32_t size; 851 - uint32_t buffer_handle; 852 - uint32_t shader_handle; 853 - uint64_t offset; 852 + __u32 size; 853 + __u32 buffer_handle; 854 + __u32 shader_handle; 855 + __u64 offset; 854 856 }; 855 857 856 858 /*************************************************************************/ ··· 869 871 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 870 872 */ 871 873 struct drm_vmw_shader_arg { 872 - uint32_t handle; 873 - uint32_t pad64; 874 + __u32 handle; 875 + __u32 pad64; 874 876 }; 875 877 876 878 /*************************************************************************/ ··· 916 918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 917 919 */ 918 920 struct drm_vmw_gb_surface_create_req { 919 - uint32_t svga3d_flags; 920 - uint32_t format; 921 - uint32_t mip_levels; 921 + __u32 svga3d_flags; 922 + __u32 format; 923 + __u32 mip_levels; 922 924 enum drm_vmw_surface_flags drm_surface_flags; 923 - uint32_t multisample_count; 924 - uint32_t autogen_filter; 925 - uint32_t buffer_handle; 926 - uint32_t array_size; 925 + __u32 multisample_count; 926 + __u32 autogen_filter; 927 + __u32 buffer_handle; 928 + __u32 array_size; 927 929 struct drm_vmw_size base_size; 928 930 }; 929 931 ··· 942 944 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 943 945 */ 944 946 struct drm_vmw_gb_surface_create_rep { 945 - uint32_t handle; 946 - uint32_t backup_size; 947 - uint32_t buffer_handle; 948 - uint32_t buffer_size; 949 - uint64_t buffer_map_handle; 947 + __u32 handle; 948 + __u32 backup_size; 949 + __u32 buffer_handle; 950 + __u32 buffer_size; 951 + __u64 buffer_map_handle; 950 952 }; 951 953 952 954 /** ··· 1059 1061 struct drm_vmw_synccpu_arg { 1060 1062 enum drm_vmw_synccpu_op op; 1061 1063 enum drm_vmw_synccpu_flags flags; 1062 - uint32_t handle; 1063 - uint32_t pad64; 1064 + __u32 handle; 1065 + __u32 pad64; 1064 1066 }; 1065 1067 1066 1068 /*************************************************************************/
+1
include/uapi/linux/agpgart.h
··· 52 52 53 53 #ifndef __KERNEL__ 54 54 #include <linux/types.h> 55 + #include <stdlib.h> 55 56 56 57 struct agp_version { 57 58 __u16 major;
+1 -1
include/uapi/linux/virtio_gpu.h
··· 287 287 /* VIRTIO_GPU_RESP_OK_CAPSET */ 288 288 struct virtio_gpu_resp_capset { 289 289 struct virtio_gpu_ctrl_hdr hdr; 290 - uint8_t capset_data[]; 290 + __u8 capset_data[]; 291 291 }; 292 292 293 293 #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)