Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: omapdrm: Use kernel integer types

The standard kernel integer types are [us]{8,16,32}. Use them instead of
the u?int{8,16,32}_t types.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Sebastian Reichel <sebastian.reichel@collabora.co.uk>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>

authored by

Laurent Pinchart and committed by
Tomi Valkeinen
dfe9cfcc f073d78e

+101 -98
+6 -6
drivers/gpu/drm/omapdrm/omap_crtc.c
··· 272 272 * Setup, Flush and Page Flip 273 273 */ 274 274 275 - void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus) 275 + void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus) 276 276 { 277 277 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 278 278 ··· 492 492 struct drm_plane_state *pri_state; 493 493 494 494 if (state->color_mgmt_changed && state->gamma_lut) { 495 - uint length = state->gamma_lut->length / 495 + unsigned int length = state->gamma_lut->length / 496 496 sizeof(struct drm_color_lut); 497 497 498 498 if (length < 2) ··· 526 526 527 527 if (crtc->state->color_mgmt_changed) { 528 528 struct drm_color_lut *lut = NULL; 529 - uint length = 0; 529 + unsigned int length = 0; 530 530 531 531 if (crtc->state->gamma_lut) { 532 532 lut = (struct drm_color_lut *) ··· 557 557 static int omap_crtc_atomic_set_property(struct drm_crtc *crtc, 558 558 struct drm_crtc_state *state, 559 559 struct drm_property *property, 560 - uint64_t val) 560 + u64 val) 561 561 { 562 562 struct omap_drm_private *priv = crtc->dev->dev_private; 563 563 struct drm_plane_state *plane_state; ··· 585 585 static int omap_crtc_atomic_get_property(struct drm_crtc *crtc, 586 586 const struct drm_crtc_state *state, 587 587 struct drm_property *property, 588 - uint64_t *val) 588 + u64 *val) 589 589 { 590 590 struct omap_drm_private *priv = crtc->dev->dev_private; 591 591 struct omap_crtc_state *omap_state = to_omap_crtc_state(state); ··· 732 732 * gamma table is not supprted. 733 733 */ 734 734 if (priv->dispc_ops->mgr_gamma_size(channel)) { 735 - uint gamma_lut_size = 256; 735 + unsigned int gamma_lut_size = 256; 736 736 737 737 drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size); 738 738 drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+1 -1
drivers/gpu/drm/omapdrm/omap_crtc.h
··· 37 37 struct drm_crtc *omap_crtc_init(struct drm_device *dev, 38 38 struct drm_plane *plane, struct omap_dss_device *dssdev); 39 39 int omap_crtc_wait_pending(struct drm_crtc *crtc); 40 - void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus); 40 + void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus); 41 41 void omap_crtc_vblank_irq(struct drm_crtc *crtc); 42 42 43 43 #endif /* __OMAPDRM_CRTC_H__ */
+5 -5
drivers/gpu/drm/omapdrm/omap_dmm_priv.h
··· 102 102 }; 103 103 104 104 struct pat { 105 - uint32_t next_pa; 105 + u32 next_pa; 106 106 struct pat_area area; 107 107 struct pat_ctrl ctrl; 108 - uint32_t data_pa; 108 + u32 data_pa; 109 109 }; 110 110 111 111 #define DMM_FIXED_RETRY_COUNT 1000 ··· 129 129 void *engine_handle; 130 130 struct tcm *tcm; 131 131 132 - uint8_t *current_va; 132 + u8 *current_va; 133 133 dma_addr_t current_pa; 134 134 135 135 struct pat *last_pat; ··· 140 140 struct dmm *dmm; 141 141 struct tcm *tcm; 142 142 143 - uint8_t *refill_va; 143 + u8 *refill_va; 144 144 dma_addr_t refill_pa; 145 145 146 146 /* only one trans per engine for now */ ··· 154 154 }; 155 155 156 156 struct dmm_platform_data { 157 - uint32_t cpu_cache_flags; 157 + u32 cpu_cache_flags; 158 158 }; 159 159 160 160 struct dmm {
+23 -23
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
··· 58 58 } 59 59 60 60 static const struct { 61 - uint32_t x_shft; /* unused X-bits (as part of bpp) */ 62 - uint32_t y_shft; /* unused Y-bits (as part of bpp) */ 63 - uint32_t cpp; /* bytes/chars per pixel */ 64 - uint32_t slot_w; /* width of each slot (in pixels) */ 65 - uint32_t slot_h; /* height of each slot (in pixels) */ 61 + u32 x_shft; /* unused X-bits (as part of bpp) */ 62 + u32 y_shft; /* unused Y-bits (as part of bpp) */ 63 + u32 cpp; /* bytes/chars per pixel */ 64 + u32 slot_w; /* width of each slot (in pixels) */ 65 + u32 slot_h; /* height of each slot (in pixels) */ 66 66 } geom[TILFMT_NFORMATS] = { 67 67 [TILFMT_8BIT] = GEOM(0, 0, 1), 68 68 [TILFMT_16BIT] = GEOM(0, 1, 2), ··· 72 72 73 73 74 74 /* lookup table for registers w/ per-engine instances */ 75 - static const uint32_t reg[][4] = { 75 + static const u32 reg[][4] = { 76 76 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1, 77 77 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3}, 78 78 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1, ··· 111 111 } 112 112 113 113 /* check status and spin until wait_mask comes true */ 114 - static int wait_status(struct refill_engine *engine, uint32_t wait_mask) 114 + static int wait_status(struct refill_engine *engine, u32 wait_mask) 115 115 { 116 116 struct dmm *dmm = engine->dmm; 117 - uint32_t r = 0, err, i; 117 + u32 r = 0, err, i; 118 118 119 119 i = DMM_FIXED_RETRY_COUNT; 120 120 while (true) { ··· 158 158 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) 159 159 { 160 160 struct dmm *dmm = arg; 161 - uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS); 161 + u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS); 162 162 int i; 163 163 164 164 /* ack IRQ */ ··· 226 226 * corresponding slot is cleared (ie. dummy_pa is programmed) 227 227 */ 228 228 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, 229 - struct page **pages, uint32_t npages, uint32_t roll) 229 + struct page **pages, u32 npages, u32 roll) 230 230 { 231 231 dma_addr_t pat_pa = 0, data_pa = 0; 232 - uint32_t *data; 232 + u32 *data; 233 233 struct pat *pat; 234 234 struct refill_engine *engine = txn->engine_handle; 235 235 int columns = (1 + area->x1 - area->x0); ··· 239 239 pat = alloc_dma(txn, sizeof(*pat), &pat_pa); 240 240 241 241 if (txn->last_pat) 242 - txn->last_pat->next_pa = (uint32_t)pat_pa; 242 + txn->last_pat->next_pa = (u32)pat_pa; 243 243 244 244 pat->area = *area; 245 245 ··· 330 330 * DMM programming 331 331 */ 332 332 static int fill(struct tcm_area *area, struct page **pages, 333 - uint32_t npages, uint32_t roll, bool wait) 333 + u32 npages, u32 roll, bool wait) 334 334 { 335 335 int ret = 0; 336 336 struct tcm_area slice, area_s; ··· 378 378 /* note: slots for which pages[i] == NULL are filled w/ dummy page 379 379 */ 380 380 int tiler_pin(struct tiler_block *block, struct page **pages, 381 - uint32_t npages, uint32_t roll, bool wait) 381 + u32 npages, u32 roll, bool wait) 382 382 { 383 383 int ret; 384 384 ··· 398 398 /* 399 399 * Reserve/release 400 400 */ 401 - struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, 402 - uint16_t h, uint16_t align) 401 + struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, 402 + u16 h, u16 align) 403 403 { 404 404 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 405 405 u32 min_align = 128; ··· 542 542 block->area.p0.y * geom[block->fmt].slot_h); 543 543 } 544 544 545 - dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, 546 - uint32_t x, uint32_t y) 545 + dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient, 546 + u32 x, u32 y) 547 547 { 548 548 struct tcm_pt *p = &block->area.p0; 549 549 BUG_ON(!validfmt(block->fmt)); ··· 553 553 (p->y * geom[block->fmt].slot_h) + y); 554 554 } 555 555 556 - void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h) 556 + void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h) 557 557 { 558 558 BUG_ON(!validfmt(fmt)); 559 559 *w = round_up(*w, geom[fmt].slot_w); 560 560 *h = round_up(*h, geom[fmt].slot_h); 561 561 } 562 562 563 - uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient) 563 + u32 tiler_stride(enum tiler_fmt fmt, u32 orient) 564 564 { 565 565 BUG_ON(!validfmt(fmt)); 566 566 ··· 570 570 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft); 571 571 } 572 572 573 - size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h) 573 + size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h) 574 574 { 575 575 tiler_align(fmt, &w, &h); 576 576 return geom[fmt].cpp * w * h; 577 577 } 578 578 579 - size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h) 579 + size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h) 580 580 { 581 581 BUG_ON(!validfmt(fmt)); 582 582 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h; 583 583 } 584 584 585 - uint32_t tiler_get_cpu_cache_flags(void) 585 + u32 tiler_get_cpu_cache_flags(void) 586 586 { 587 587 return omap_dmm->plat_data->cpu_cache_flags; 588 588 }
+11 -11
drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
··· 88 88 89 89 /* pin/unpin */ 90 90 int tiler_pin(struct tiler_block *block, struct page **pages, 91 - uint32_t npages, uint32_t roll, bool wait); 91 + u32 npages, u32 roll, bool wait); 92 92 int tiler_unpin(struct tiler_block *block); 93 93 94 94 /* reserve/release */ 95 - struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h, 96 - uint16_t align); 95 + struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h, 96 + u16 align); 97 97 struct tiler_block *tiler_reserve_1d(size_t size); 98 98 int tiler_release(struct tiler_block *block); 99 99 100 100 /* utilities */ 101 101 dma_addr_t tiler_ssptr(struct tiler_block *block); 102 - dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient, 103 - uint32_t x, uint32_t y); 104 - uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient); 105 - size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h); 106 - size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h); 107 - void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h); 108 - uint32_t tiler_get_cpu_cache_flags(void); 102 + dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient, 103 + u32 x, u32 y); 104 + u32 tiler_stride(enum tiler_fmt fmt, u32 orient); 105 + size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h); 106 + size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h); 107 + void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h); 108 + u32 tiler_get_cpu_cache_flags(void); 109 109 bool dmm_is_available(void); 110 110 111 111 extern struct platform_driver omap_dmm_driver; 112 112 113 113 /* GEM bo flags -> tiler fmt */ 114 - static inline enum tiler_fmt gem2fmt(uint32_t flags) 114 + static inline enum tiler_fmt gem2fmt(u32 flags) 115 115 { 116 116 switch (flags & OMAP_BO_TILED) { 117 117 case OMAP_BO_TILED_8:
+2 -2
drivers/gpu/drm/omapdrm/omap_drv.h
··· 46 46 struct omap_drm_usergart; 47 47 48 48 struct omap_drm_private { 49 - uint32_t omaprev; 49 + u32 omaprev; 50 50 51 51 const struct dispc_ops *dispc_ops; 52 52 ··· 81 81 /* irq handling: */ 82 82 spinlock_t wait_lock; /* protects the wait_list */ 83 83 struct list_head wait_list; /* list of omap_irq_wait */ 84 - uint32_t irq_mask; /* enabled irqs in addition to wait_list */ 84 + u32 irq_mask; /* enabled irqs in addition to wait_list */ 85 85 86 86 /* memory bandwidth limit if it is needed on the platform */ 87 87 unsigned int max_bandwidth;
+9 -9
drivers/gpu/drm/omapdrm/omap_fb.c
··· 52 52 /* per-plane info for the fb: */ 53 53 struct plane { 54 54 struct drm_gem_object *bo; 55 - uint32_t pitch; 56 - uint32_t offset; 55 + u32 pitch; 56 + u32 offset; 57 57 dma_addr_t dma_addr; 58 58 }; 59 59 ··· 100 100 .destroy = omap_framebuffer_destroy, 101 101 }; 102 102 103 - static uint32_t get_linear_addr(struct plane *plane, 103 + static u32 get_linear_addr(struct plane *plane, 104 104 const struct drm_format_info *format, int n, int x, int y) 105 105 { 106 - uint32_t offset; 106 + u32 offset; 107 107 108 108 offset = plane->offset 109 109 + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub)) ··· 121 121 } 122 122 123 123 /* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */ 124 - static uint32_t drm_rotation_to_tiler(unsigned int drm_rot) 124 + static u32 drm_rotation_to_tiler(unsigned int drm_rot) 125 125 { 126 - uint32_t orient; 126 + u32 orient; 127 127 128 128 switch (drm_rot & DRM_MODE_ROTATE_MASK) { 129 129 default: ··· 158 158 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 159 159 const struct drm_format_info *format = omap_fb->format; 160 160 struct plane *plane = &omap_fb->planes[0]; 161 - uint32_t x, y, orient = 0; 161 + u32 x, y, orient = 0; 162 162 163 163 info->fourcc = fb->format->format; 164 164 ··· 177 177 y = state->src_y >> 16; 178 178 179 179 if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) { 180 - uint32_t w = state->src_w >> 16; 181 - uint32_t h = state->src_h >> 16; 180 + u32 w = state->src_w >> 16; 181 + u32 h = state->src_h >> 16; 182 182 183 183 orient = drm_rotation_to_tiler(state->rotation); 184 184
+22 -19
drivers/gpu/drm/omapdrm/omap_gem.c
··· 39 39 40 40 struct list_head mm_list; 41 41 42 - uint32_t flags; 42 + u32 flags; 43 43 44 44 /** width/height for tiled formats (rounded up to slot boundaries) */ 45 - uint16_t width, height; 45 + u16 width, height; 46 46 47 47 /** roll applied when mapping to DMM */ 48 - uint32_t roll; 48 + u32 roll; 49 49 50 50 /** 51 51 * dma_addr contains the buffer DMA address. It is valid for ··· 73 73 /** 74 74 * # of users of dma_addr 75 75 */ 76 - uint32_t dma_addr_cnt; 76 + u32 dma_addr_cnt; 77 77 78 78 /** 79 79 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag ··· 137 137 */ 138 138 139 139 /** get mmap offset */ 140 - static uint64_t mmap_offset(struct drm_gem_object *obj) 140 + static u64 mmap_offset(struct drm_gem_object *obj) 141 141 { 142 142 struct drm_device *dev = obj->dev; 143 143 int ret; ··· 331 331 } 332 332 333 333 /* get buffer flags */ 334 - uint32_t omap_gem_flags(struct drm_gem_object *obj) 334 + u32 omap_gem_flags(struct drm_gem_object *obj) 335 335 { 336 336 return to_omap_bo(obj)->flags; 337 337 } 338 338 339 - uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 339 + u64 omap_gem_mmap_offset(struct drm_gem_object *obj) 340 340 { 341 - uint64_t offset; 341 + u64 offset; 342 + 342 343 mutex_lock(&obj->dev->struct_mutex); 343 344 offset = mmap_offset(obj); 344 345 mutex_unlock(&obj->dev->struct_mutex); ··· 650 649 * into user memory. We don't have to do much here at the moment. 651 650 */ 652 651 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 653 - uint32_t handle, uint64_t *offset) 652 + u32 handle, u64 *offset) 654 653 { 655 654 struct drm_gem_object *obj; 656 655 int ret = 0; ··· 676 675 * 677 676 * Call only from non-atomic contexts. 678 677 */ 679 - int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 678 + int omap_gem_roll(struct drm_gem_object *obj, u32 roll) 680 679 { 681 680 struct omap_gem_object *omap_obj = to_omap_bo(obj); 682 - uint32_t npages = obj->size >> PAGE_SHIFT; 681 + u32 npages = obj->size >> PAGE_SHIFT; 683 682 int ret = 0; 684 683 685 684 if (roll > npages) { ··· 809 808 if (!is_contiguous(omap_obj) && priv->has_dmm) { 810 809 if (omap_obj->dma_addr_cnt == 0) { 811 810 struct page **pages; 812 - uint32_t npages = obj->size >> PAGE_SHIFT; 811 + u32 npages = obj->size >> PAGE_SHIFT; 813 812 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 814 813 struct tiler_block *block; 815 814 ··· 905 904 * specified orientation and x,y offset from top-left corner of buffer 906 905 * (only valid for tiled 2d buffers) 907 906 */ 908 - int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient, 907 + int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, 909 908 int x, int y, dma_addr_t *dma_addr) 910 909 { 911 910 struct omap_gem_object *omap_obj = to_omap_bo(obj); ··· 922 921 } 923 922 924 923 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 925 - int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 924 + int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient) 926 925 { 927 926 struct omap_gem_object *omap_obj = to_omap_bo(obj); 928 927 int ret = -EINVAL; ··· 1004 1003 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 1005 1004 if (omap_obj->block) { 1006 1005 struct drm_gem_object *obj = &omap_obj->base; 1007 - uint32_t npages = obj->size >> PAGE_SHIFT; 1006 + u32 npages = obj->size >> PAGE_SHIFT; 1007 + 1008 1008 WARN_ON(!omap_obj->pages); /* this can't happen */ 1009 1009 ret = tiler_pin(omap_obj->block, 1010 1010 omap_obj->pages, npages, ··· 1029 1027 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 1030 1028 { 1031 1029 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1032 - uint64_t off; 1030 + u64 off; 1033 1031 1034 1032 off = drm_vma_node_start(&obj->vma_node); 1035 1033 ··· 1117 1115 1118 1116 /* GEM buffer object constructor */ 1119 1117 struct drm_gem_object *omap_gem_new(struct drm_device *dev, 1120 - union omap_gem_size gsize, uint32_t flags) 1118 + union omap_gem_size gsize, u32 flags) 1121 1119 { 1122 1120 struct omap_drm_private *priv = dev->dev_private; 1123 1121 struct omap_gem_object *omap_obj; ··· 1282 1280 1283 1281 /* convenience method to construct a GEM buffer object, and userspace handle */ 1284 1282 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1285 - union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 1283 + union omap_gem_size gsize, u32 flags, u32 *handle) 1286 1284 { 1287 1285 struct drm_gem_object *obj; 1288 1286 int ret; ··· 1329 1327 1330 1328 /* reserve 4k aligned/wide regions for userspace mappings: */ 1331 1329 for (i = 0; i < ARRAY_SIZE(fmts); i++) { 1332 - uint16_t h = 1, w = PAGE_SIZE >> i; 1330 + u16 h = 1, w = PAGE_SIZE >> i; 1331 + 1333 1332 tiler_align(fmts[i], &w, &h); 1334 1333 /* note: since each region is 1 4kb page wide, and minimum 1335 1334 * number of rows, the height ends up being the same as the
+8 -8
drivers/gpu/drm/omapdrm/omap_gem.h
··· 53 53 54 54 /* GEM Object Creation and Deletion */ 55 55 struct drm_gem_object *omap_gem_new(struct drm_device *dev, 56 - union omap_gem_size gsize, uint32_t flags); 56 + union omap_gem_size gsize, u32 flags); 57 57 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, 58 58 struct sg_table *sgt); 59 59 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 60 - union omap_gem_size gsize, uint32_t flags, uint32_t *handle); 60 + union omap_gem_size gsize, u32 flags, u32 *handle); 61 61 void omap_gem_free_object(struct drm_gem_object *obj); 62 62 void *omap_gem_vaddr(struct drm_gem_object *obj); 63 63 64 64 /* Dumb Buffers Interface */ 65 65 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 66 - uint32_t handle, uint64_t *offset); 66 + u32 handle, u64 *offset); 67 67 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 68 68 struct drm_mode_create_dumb *args); 69 69 ··· 71 71 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); 72 72 int omap_gem_mmap_obj(struct drm_gem_object *obj, 73 73 struct vm_area_struct *vma); 74 - uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); 74 + u64 omap_gem_mmap_offset(struct drm_gem_object *obj); 75 75 size_t omap_gem_mmap_size(struct drm_gem_object *obj); 76 76 77 77 /* PRIME Interface */ ··· 81 81 struct dma_buf *buffer); 82 82 83 83 int omap_gem_fault(struct vm_fault *vmf); 84 - int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll); 84 + int omap_gem_roll(struct drm_gem_object *obj, u32 roll); 85 85 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff); 86 86 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, 87 87 enum dma_data_direction dir); ··· 91 91 bool remap); 92 92 int omap_gem_put_pages(struct drm_gem_object *obj); 93 93 94 - uint32_t omap_gem_flags(struct drm_gem_object *obj); 95 - int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient, 94 + u32 omap_gem_flags(struct drm_gem_object *obj); 95 + int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, 96 96 int x, int y, dma_addr_t *dma_addr); 97 - int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); 97 + int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); 98 98 99 99 #endif /* __OMAPDRM_GEM_H__ */
+3 -3
drivers/gpu/drm/omapdrm/omap_irq.c
··· 20 20 struct omap_irq_wait { 21 21 struct list_head node; 22 22 wait_queue_head_t wq; 23 - uint32_t irqmask; 23 + u32 irqmask; 24 24 int count; 25 25 }; 26 26 ··· 29 29 { 30 30 struct omap_drm_private *priv = dev->dev_private; 31 31 struct omap_irq_wait *wait; 32 - uint32_t irqmask = priv->irq_mask; 32 + u32 irqmask = priv->irq_mask; 33 33 34 34 assert_spin_locked(&priv->wait_lock); 35 35 ··· 48 48 } 49 49 50 50 struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, 51 - uint32_t irqmask, int count) 51 + u32 irqmask, int count) 52 52 { 53 53 struct omap_drm_private *priv = dev->dev_private; 54 54 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+1 -1
drivers/gpu/drm/omapdrm/omap_irq.h
··· 32 32 int omap_drm_irq_install(struct drm_device *dev); 33 33 34 34 struct omap_irq_wait *omap_irq_wait_init(struct drm_device *dev, 35 - uint32_t irqmask, int count); 35 + u32 irqmask, int count); 36 36 int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, 37 37 unsigned long timeout); 38 38
+2 -2
drivers/gpu/drm/omapdrm/omap_plane.c
··· 201 201 static int omap_plane_atomic_set_property(struct drm_plane *plane, 202 202 struct drm_plane_state *state, 203 203 struct drm_property *property, 204 - uint64_t val) 204 + u64 val) 205 205 { 206 206 struct omap_drm_private *priv = plane->dev->dev_private; 207 207 ··· 216 216 static int omap_plane_atomic_get_property(struct drm_plane *plane, 217 217 const struct drm_plane_state *state, 218 218 struct drm_property *property, 219 - uint64_t *val) 219 + u64 *val) 220 220 { 221 221 struct omap_drm_private *priv = plane->dev->dev_private; 222 222
+6 -6
drivers/gpu/drm/omapdrm/tcm-sita.c
··· 33 33 * map ptr to bitmap 34 34 * stride slots in a row 35 35 */ 36 - static void free_slots(unsigned long pos, uint16_t w, uint16_t h, 37 - unsigned long *map, uint16_t stride) 36 + static void free_slots(unsigned long pos, u16 w, u16 h, 37 + unsigned long *map, u16 stride) 38 38 { 39 39 int i; 40 40 ··· 48 48 * map ptr to bitmap 49 49 * num_bits number of bits in bitmap 50 50 */ 51 - static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map, 51 + static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map, 52 52 size_t num_bits) 53 53 { 54 54 unsigned long search_count = 0; ··· 84 84 * num_bits = size of bitmap 85 85 * stride = bits in one row of container 86 86 */ 87 - static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset, 87 + static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset, 88 88 unsigned long *pos, unsigned long slot_bytes, 89 89 unsigned long *map, size_t num_bits, size_t slot_stride) 90 90 { ··· 179 179 } 180 180 181 181 static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align, 182 - int16_t offset, uint16_t slot_bytes, 182 + s16 offset, u16 slot_bytes, 183 183 struct tcm_area *area) 184 184 { 185 185 unsigned long pos; ··· 208 208 static s32 sita_free(struct tcm *tcm, struct tcm_area *area) 209 209 { 210 210 unsigned long pos; 211 - uint16_t w, h; 211 + u16 w, h; 212 212 213 213 pos = area->p0.x + area->p0.y * tcm->width; 214 214 if (area->is2d) {
+2 -2
drivers/gpu/drm/omapdrm/tcm.h
··· 65 65 66 66 /* function table */ 67 67 s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align, 68 - int16_t offset, uint16_t slot_bytes, 68 + s16 offset, u16 slot_bytes, 69 69 struct tcm_area *area); 70 70 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); 71 71 s32 (*free)(struct tcm *tcm, struct tcm_area *area); ··· 129 129 * allocation. 130 130 */ 131 131 static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, 132 - u16 align, int16_t offset, uint16_t slot_bytes, 132 + u16 align, s16 offset, u16 slot_bytes, 133 133 struct tcm_area *area) 134 134 { 135 135 /* perform rudimentary error checking */