sem2mutex: drivers/char/drm/

From: Arjan van de Ven <arjan@infradead.org>

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.

Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by Dave Airlie and committed by Dave Airlie 30e2fb18 ce60fe02

+126 -125
+3 -2
drivers/char/drm/drmP.h
··· 57 57 #include <linux/smp_lock.h> /* For (un)lock_kernel */ 58 58 #include <linux/mm.h> 59 59 #include <linux/cdev.h> 60 + #include <linux/mutex.h> 60 61 #if defined(__alpha__) || defined(__powerpc__) 61 62 #include <asm/pgtable.h> /* For pte_wrprotect */ 62 63 #endif ··· 624 623 /** \name Locks */ 625 624 /*@{ */ 626 625 spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ 627 - struct semaphore struct_sem; /**< For others */ 626 + struct mutex struct_mutex; /**< For others */ 628 627 /*@} */ 629 628 630 629 /** \name Usage Counters */ ··· 659 658 /*@{ */ 660 659 drm_ctx_list_t *ctxlist; /**< Linked list of context handles */ 661 660 int ctx_count; /**< Number of context handles */ 662 - struct semaphore ctxlist_sem; /**< For ctxlist */ 661 + struct mutex ctxlist_mutex; /**< For ctxlist */ 663 662 664 663 drm_map_t **context_sareas; /**< per-context SAREA's */ 665 664 int max_context;
+10 -10
drivers/char/drm/drm_auth.c
··· 56 56 * \param magic magic number. 57 57 * 58 58 * Searches in drm_device::magiclist within all files with the same hash key 59 - * the one with matching magic number, while holding the drm_device::struct_sem 59 + * the one with matching magic number, while holding the drm_device::struct_mutex 60 60 * lock. 61 61 */ 62 62 static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) ··· 65 65 drm_magic_entry_t *pt; 66 66 int hash = drm_hash_magic(magic); 67 67 68 - down(&dev->struct_sem); 68 + mutex_lock(&dev->struct_mutex); 69 69 for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { 70 70 if (pt->magic == magic) { 71 71 retval = pt->priv; 72 72 break; 73 73 } 74 74 } 75 - up(&dev->struct_sem); 75 + mutex_unlock(&dev->struct_mutex); 76 76 return retval; 77 77 } 78 78 ··· 85 85 * 86 86 * Creates a drm_magic_entry structure and appends to the linked list 87 87 * associated the magic number hash key in drm_device::magiclist, while holding 88 - * the drm_device::struct_sem lock. 88 + * the drm_device::struct_mutex lock. 89 89 */ 90 90 static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, 91 91 drm_magic_t magic) ··· 104 104 entry->priv = priv; 105 105 entry->next = NULL; 106 106 107 - down(&dev->struct_sem); 107 + mutex_lock(&dev->struct_mutex); 108 108 if (dev->magiclist[hash].tail) { 109 109 dev->magiclist[hash].tail->next = entry; 110 110 dev->magiclist[hash].tail = entry; ··· 112 112 dev->magiclist[hash].head = entry; 113 113 dev->magiclist[hash].tail = entry; 114 114 } 115 - up(&dev->struct_sem); 115 + mutex_unlock(&dev->struct_mutex); 116 116 117 117 return 0; 118 118 } ··· 124 124 * \param magic magic number. 125 125 * 126 126 * Searches and unlinks the entry in drm_device::magiclist with the magic 127 - * number hash key, while holding the drm_device::struct_sem lock. 127 + * number hash key, while holding the drm_device::struct_mutex lock. 128 128 */ 129 129 static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) 130 130 { ··· 135 135 DRM_DEBUG("%d\n", magic); 136 136 hash = drm_hash_magic(magic); 137 137 138 - down(&dev->struct_sem); 138 + mutex_lock(&dev->struct_mutex); 139 139 for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { 140 140 if (pt->magic == magic) { 141 141 if (dev->magiclist[hash].head == pt) { ··· 147 147 if (prev) { 148 148 prev->next = pt->next; 149 149 } 150 - up(&dev->struct_sem); 150 + mutex_unlock(&dev->struct_mutex); 151 151 return 0; 152 152 } 153 153 } 154 - up(&dev->struct_sem); 154 + mutex_unlock(&dev->struct_mutex); 155 155 156 156 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 157 157
+40 -40
drivers/char/drm/drm_bufs.c
··· 255 255 memset(list, 0, sizeof(*list)); 256 256 list->map = map; 257 257 258 - down(&dev->struct_sem); 258 + mutex_lock(&dev->struct_mutex); 259 259 list_add(&list->head, &dev->maplist->head); 260 260 /* Assign a 32-bit handle */ 261 - /* We do it here so that dev->struct_sem protects the increment */ 261 + /* We do it here so that dev->struct_mutex protects the increment */ 262 262 list->user_token = HandleID(map->type == _DRM_SHM 263 263 ? (unsigned long)map->handle 264 264 : map->offset, dev); 265 - up(&dev->struct_sem); 265 + mutex_unlock(&dev->struct_mutex); 266 266 267 267 *maplist = list; 268 268 return 0; ··· 392 392 { 393 393 int ret; 394 394 395 - down(&dev->struct_sem); 395 + mutex_lock(&dev->struct_mutex); 396 396 ret = drm_rmmap_locked(dev, map); 397 - up(&dev->struct_sem); 397 + mutex_unlock(&dev->struct_mutex); 398 398 399 399 return ret; 400 400 } ··· 423 423 return -EFAULT; 424 424 } 425 425 426 - down(&dev->struct_sem); 426 + mutex_lock(&dev->struct_mutex); 427 427 list_for_each(list, &dev->maplist->head) { 428 428 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); 429 429 ··· 439 439 * find anything. 440 440 */ 441 441 if (list == (&dev->maplist->head)) { 442 - up(&dev->struct_sem); 442 + mutex_unlock(&dev->struct_mutex); 443 443 return -EINVAL; 444 444 } 445 445 ··· 448 448 449 449 /* Register and framebuffer maps are permanent */ 450 450 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 451 - up(&dev->struct_sem); 451 + mutex_unlock(&dev->struct_mutex); 452 452 return 0; 453 453 } 454 454 455 455 ret = drm_rmmap_locked(dev, map); 456 456 457 - up(&dev->struct_sem); 457 + mutex_unlock(&dev->struct_mutex); 458 458 459 459 return ret; 460 460 } ··· 566 566 atomic_inc(&dev->buf_alloc); 567 567 spin_unlock(&dev->count_lock); 568 568 569 - down(&dev->struct_sem); 569 + mutex_lock(&dev->struct_mutex); 570 570 entry = &dma->bufs[order]; 571 571 if (entry->buf_count) { 572 - up(&dev->struct_sem); 572 + mutex_unlock(&dev->struct_mutex); 573 573 atomic_dec(&dev->buf_alloc); 574 574 return -ENOMEM; /* May only call once for each order */ 575 575 } 576 576 577 577 if (count < 0 || count > 4096) { 578 - up(&dev->struct_sem); 578 + mutex_unlock(&dev->struct_mutex); 579 579 atomic_dec(&dev->buf_alloc); 580 580 return -EINVAL; 581 581 } ··· 583 583 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 584 584 DRM_MEM_BUFS); 585 585 if (!entry->buflist) { 586 - up(&dev->struct_sem); 586 + mutex_unlock(&dev->struct_mutex); 587 587 atomic_dec(&dev->buf_alloc); 588 588 return -ENOMEM; 589 589 } ··· 616 616 /* Set count correctly so we free the proper amount. */ 617 617 entry->buf_count = count; 618 618 drm_cleanup_buf_error(dev, entry); 619 - up(&dev->struct_sem); 619 + mutex_unlock(&dev->struct_mutex); 620 620 atomic_dec(&dev->buf_alloc); 621 621 return -ENOMEM; 622 622 } ··· 638 638 if (!temp_buflist) { 639 639 /* Free the entry because it isn't valid */ 640 640 drm_cleanup_buf_error(dev, entry); 641 - up(&dev->struct_sem); 641 + mutex_unlock(&dev->struct_mutex); 642 642 atomic_dec(&dev->buf_alloc); 643 643 return -ENOMEM; 644 644 } ··· 656 656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 657 657 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 658 658 659 - up(&dev->struct_sem); 659 + mutex_unlock(&dev->struct_mutex); 660 660 661 661 request->count = entry->buf_count; 662 662 request->size = size; ··· 722 722 atomic_inc(&dev->buf_alloc); 723 723 spin_unlock(&dev->count_lock); 724 724 725 - down(&dev->struct_sem); 725 + mutex_lock(&dev->struct_mutex); 726 726 entry = &dma->bufs[order]; 727 727 if (entry->buf_count) { 728 - up(&dev->struct_sem); 728 + mutex_unlock(&dev->struct_mutex); 729 729 atomic_dec(&dev->buf_alloc); 730 730 return -ENOMEM; /* May only call once for each order */ 731 731 } 732 732 733 733 if (count < 0 || count > 4096) { 734 - up(&dev->struct_sem); 734 + mutex_unlock(&dev->struct_mutex); 735 735 atomic_dec(&dev->buf_alloc); 736 736 return -EINVAL; 737 737 } ··· 739 739 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 740 740 DRM_MEM_BUFS); 741 741 if (!entry->buflist) { 742 - up(&dev->struct_sem); 742 + mutex_unlock(&dev->struct_mutex); 743 743 atomic_dec(&dev->buf_alloc); 744 744 return -ENOMEM; 745 745 } ··· 750 750 if (!entry->seglist) { 751 751 drm_free(entry->buflist, 752 752 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 753 - up(&dev->struct_sem); 753 + mutex_unlock(&dev->struct_mutex); 754 754 atomic_dec(&dev->buf_alloc); 755 755 return -ENOMEM; 756 756 } ··· 766 766 count * sizeof(*entry->buflist), DRM_MEM_BUFS); 767 767 drm_free(entry->seglist, 768 768 count * sizeof(*entry->seglist), DRM_MEM_SEGS); 769 - up(&dev->struct_sem); 769 + mutex_unlock(&dev->struct_mutex); 770 770 atomic_dec(&dev->buf_alloc); 771 771 return -ENOMEM; 772 772 } ··· 790 790 drm_free(temp_pagelist, 791 791 (dma->page_count + (count << page_order)) 792 792 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 793 - up(&dev->struct_sem); 793 + mutex_unlock(&dev->struct_mutex); 794 794 atomic_dec(&dev->buf_alloc); 795 795 return -ENOMEM; 796 796 } ··· 831 831 (count << page_order)) 832 832 * sizeof(*dma->pagelist), 833 833 DRM_MEM_PAGES); 834 - up(&dev->struct_sem); 834 + mutex_unlock(&dev->struct_mutex); 835 835 atomic_dec(&dev->buf_alloc); 836 836 return -ENOMEM; 837 837 } ··· 853 853 drm_free(temp_pagelist, 854 854 (dma->page_count + (count << page_order)) 855 855 * sizeof(*dma->pagelist), DRM_MEM_PAGES); 856 - up(&dev->struct_sem); 856 + mutex_unlock(&dev->struct_mutex); 857 857 atomic_dec(&dev->buf_alloc); 858 858 return -ENOMEM; 859 859 } ··· 878 878 dma->page_count += entry->seg_count << page_order; 879 879 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 880 880 881 - up(&dev->struct_sem); 881 + mutex_unlock(&dev->struct_mutex); 882 882 883 883 request->count = entry->buf_count; 884 884 request->size = size; ··· 948 948 atomic_inc(&dev->buf_alloc); 949 949 spin_unlock(&dev->count_lock); 950 950 951 - down(&dev->struct_sem); 951 + mutex_lock(&dev->struct_mutex); 952 952 entry = &dma->bufs[order]; 953 953 if (entry->buf_count) { 954 - up(&dev->struct_sem); 954 + mutex_unlock(&dev->struct_mutex); 955 955 atomic_dec(&dev->buf_alloc); 956 956 return -ENOMEM; /* May only call once for each order */ 957 957 } 958 958 959 959 if (count < 0 || count > 4096) { 960 - up(&dev->struct_sem); 960 + mutex_unlock(&dev->struct_mutex); 961 961 atomic_dec(&dev->buf_alloc); 962 962 return -EINVAL; 963 963 } ··· 965 965 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 966 966 DRM_MEM_BUFS); 967 967 if (!entry->buflist) { 968 - up(&dev->struct_sem); 968 + mutex_unlock(&dev->struct_mutex); 969 969 atomic_dec(&dev->buf_alloc); 970 970 return -ENOMEM; 971 971 } ··· 999 999 /* Set count correctly so we free the proper amount. */ 1000 1000 entry->buf_count = count; 1001 1001 drm_cleanup_buf_error(dev, entry); 1002 - up(&dev->struct_sem); 1002 + mutex_unlock(&dev->struct_mutex); 1003 1003 atomic_dec(&dev->buf_alloc); 1004 1004 return -ENOMEM; 1005 1005 } ··· 1022 1022 if (!temp_buflist) { 1023 1023 /* Free the entry because it isn't valid */ 1024 1024 drm_cleanup_buf_error(dev, entry); 1025 - up(&dev->struct_sem); 1025 + mutex_unlock(&dev->struct_mutex); 1026 1026 atomic_dec(&dev->buf_alloc); 1027 1027 return -ENOMEM; 1028 1028 } ··· 1040 1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1041 1041 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1042 1042 1043 - up(&dev->struct_sem); 1043 + mutex_unlock(&dev->struct_mutex); 1044 1044 1045 1045 request->count = entry->buf_count; 1046 1046 request->size = size; ··· 1110 1110 atomic_inc(&dev->buf_alloc); 1111 1111 spin_unlock(&dev->count_lock); 1112 1112 1113 - down(&dev->struct_sem); 1113 + mutex_lock(&dev->struct_mutex); 1114 1114 entry = &dma->bufs[order]; 1115 1115 if (entry->buf_count) { 1116 - up(&dev->struct_sem); 1116 + mutex_unlock(&dev->struct_mutex); 1117 1117 atomic_dec(&dev->buf_alloc); 1118 1118 return -ENOMEM; /* May only call once for each order */ 1119 1119 } 1120 1120 1121 1121 if (count < 0 || count > 4096) { 1122 - up(&dev->struct_sem); 1122 + mutex_unlock(&dev->struct_mutex); 1123 1123 atomic_dec(&dev->buf_alloc); 1124 1124 return -EINVAL; 1125 1125 } ··· 1127 1127 entry->buflist = drm_alloc(count * sizeof(*entry->buflist), 1128 1128 DRM_MEM_BUFS); 1129 1129 if (!entry->buflist) { 1130 - up(&dev->struct_sem); 1130 + mutex_unlock(&dev->struct_mutex); 1131 1131 atomic_dec(&dev->buf_alloc); 1132 1132 return -ENOMEM; 1133 1133 } ··· 1160 1160 /* Set count correctly so we free the proper amount. */ 1161 1161 entry->buf_count = count; 1162 1162 drm_cleanup_buf_error(dev, entry); 1163 - up(&dev->struct_sem); 1163 + mutex_unlock(&dev->struct_mutex); 1164 1164 atomic_dec(&dev->buf_alloc); 1165 1165 return -ENOMEM; 1166 1166 } ··· 1182 1182 if (!temp_buflist) { 1183 1183 /* Free the entry because it isn't valid */ 1184 1184 drm_cleanup_buf_error(dev, entry); 1185 - up(&dev->struct_sem); 1185 + mutex_unlock(&dev->struct_mutex); 1186 1186 atomic_dec(&dev->buf_alloc); 1187 1187 return -ENOMEM; 1188 1188 } ··· 1200 1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1201 1201 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1202 1202 1203 - up(&dev->struct_sem); 1203 + mutex_unlock(&dev->struct_mutex); 1204 1204 1205 1205 request->count = entry->buf_count; 1206 1206 request->size = size;
+26 -26
drivers/char/drm/drm_context.c
··· 53 53 * \param ctx_handle context handle. 54 54 * 55 55 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry 56 - * in drm_device::context_sareas, while holding the drm_device::struct_sem 56 + * in drm_device::context_sareas, while holding the drm_device::struct_mutex 57 57 * lock. 58 58 */ 59 59 void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) ··· 64 64 goto failed; 65 65 66 66 if (ctx_handle < DRM_MAX_CTXBITMAP) { 67 - down(&dev->struct_sem); 67 + mutex_lock(&dev->struct_mutex); 68 68 clear_bit(ctx_handle, dev->ctx_bitmap); 69 69 dev->context_sareas[ctx_handle] = NULL; 70 - up(&dev->struct_sem); 70 + mutex_unlock(&dev->struct_mutex); 71 71 return; 72 72 } 73 73 failed: ··· 83 83 * 84 84 * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates 85 85 * drm_device::context_sareas to accommodate the new entry while holding the 86 - * drm_device::struct_sem lock. 86 + * drm_device::struct_mutex lock. 87 87 */ 88 88 static int drm_ctxbitmap_next(drm_device_t * dev) 89 89 { ··· 92 92 if (!dev->ctx_bitmap) 93 93 return -1; 94 94 95 - down(&dev->struct_sem); 95 + mutex_lock(&dev->struct_mutex); 96 96 bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP); 97 97 if (bit < DRM_MAX_CTXBITMAP) { 98 98 set_bit(bit, dev->ctx_bitmap); ··· 113 113 DRM_MEM_MAPS); 114 114 if (!ctx_sareas) { 115 115 clear_bit(bit, dev->ctx_bitmap); 116 - up(&dev->struct_sem); 116 + mutex_unlock(&dev->struct_mutex); 117 117 return -1; 118 118 } 119 119 dev->context_sareas = ctx_sareas; ··· 126 126 DRM_MEM_MAPS); 127 127 if (!dev->context_sareas) { 128 128 clear_bit(bit, dev->ctx_bitmap); 129 - up(&dev->struct_sem); 129 + mutex_unlock(&dev->struct_mutex); 130 130 return -1; 131 131 } 132 132 dev->context_sareas[bit] = NULL; 133 133 } 134 134 } 135 - up(&dev->struct_sem); 135 + mutex_unlock(&dev->struct_mutex); 136 136 return bit; 137 137 } 138 - up(&dev->struct_sem); 138 + mutex_unlock(&dev->struct_mutex); 139 139 return -1; 140 140 } 141 141 ··· 145 145 * \param dev DRM device. 146 146 * 147 147 * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding 148 - * the drm_device::struct_sem lock. 148 + * the drm_device::struct_mutex lock. 149 149 */ 150 150 int drm_ctxbitmap_init(drm_device_t * dev) 151 151 { 152 152 int i; 153 153 int temp; 154 154 155 - down(&dev->struct_sem); 155 + mutex_lock(&dev->struct_mutex); 156 156 dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE, 157 157 DRM_MEM_CTXBITMAP); 158 158 if (dev->ctx_bitmap == NULL) { 159 - up(&dev->struct_sem); 159 + mutex_unlock(&dev->struct_mutex); 160 160 return -ENOMEM; 161 161 } 162 162 memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE); 163 163 dev->context_sareas = NULL; 164 164 dev->max_context = -1; 165 - up(&dev->struct_sem); 165 + mutex_unlock(&dev->struct_mutex); 166 166 167 167 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 168 168 temp = drm_ctxbitmap_next(dev); ··· 178 178 * \param dev DRM device. 179 179 * 180 180 * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding 181 - * the drm_device::struct_sem lock. 181 + * the drm_device::struct_mutex lock. 182 182 */ 183 183 void drm_ctxbitmap_cleanup(drm_device_t * dev) 184 184 { 185 - down(&dev->struct_sem); 185 + mutex_lock(&dev->struct_mutex); 186 186 if (dev->context_sareas) 187 187 drm_free(dev->context_sareas, 188 188 sizeof(*dev->context_sareas) * 189 189 dev->max_context, DRM_MEM_MAPS); 190 190 drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP); 191 - up(&dev->struct_sem); 191 + mutex_unlock(&dev->struct_mutex); 192 192 } 193 193 194 194 /*@}*/ ··· 222 222 if (copy_from_user(&request, argp, sizeof(request))) 223 223 return -EFAULT; 224 224 225 - down(&dev->struct_sem); 225 + mutex_lock(&dev->struct_mutex); 226 226 if (dev->max_context < 0 227 227 || request.ctx_id >= (unsigned)dev->max_context) { 228 - up(&dev->struct_sem); 228 + mutex_unlock(&dev->struct_mutex); 229 229 return -EINVAL; 230 230 } 231 231 232 232 map = dev->context_sareas[request.ctx_id]; 233 - up(&dev->struct_sem); 233 + mutex_unlock(&dev->struct_mutex); 234 234 235 235 request.handle = NULL; 236 236 list_for_each_entry(_entry, &dev->maplist->head, head) { ··· 274 274 (drm_ctx_priv_map_t __user *) arg, sizeof(request))) 275 275 return -EFAULT; 276 276 277 - down(&dev->struct_sem); 277 + mutex_lock(&dev->struct_mutex); 278 278 list_for_each(list, &dev->maplist->head) { 279 279 r_list = list_entry(list, drm_map_list_t, head); 280 280 if (r_list->map ··· 282 282 goto found; 283 283 } 284 284 bad: 285 - up(&dev->struct_sem); 285 + mutex_unlock(&dev->struct_mutex); 286 286 return -EINVAL; 287 287 288 288 found: ··· 294 294 if (request.ctx_id >= (unsigned)dev->max_context) 295 295 goto bad; 296 296 dev->context_sareas[request.ctx_id] = map; 297 - up(&dev->struct_sem); 297 + mutex_unlock(&dev->struct_mutex); 298 298 return 0; 299 299 } 300 300 ··· 448 448 ctx_entry->handle = ctx.handle; 449 449 ctx_entry->tag = priv; 450 450 451 - down(&dev->ctxlist_sem); 451 + mutex_lock(&dev->ctxlist_mutex); 452 452 list_add(&ctx_entry->head, &dev->ctxlist->head); 453 453 ++dev->ctx_count; 454 - up(&dev->ctxlist_sem); 454 + mutex_unlock(&dev->ctxlist_mutex); 455 455 456 456 if (copy_to_user(argp, &ctx, sizeof(ctx))) 457 457 return -EFAULT; ··· 574 574 drm_ctxbitmap_free(dev, ctx.handle); 575 575 } 576 576 577 - down(&dev->ctxlist_sem); 577 + mutex_lock(&dev->ctxlist_mutex); 578 578 if (!list_empty(&dev->ctxlist->head)) { 579 579 drm_ctx_list_t *pos, *n; 580 580 ··· 586 586 } 587 587 } 588 588 } 589 - up(&dev->ctxlist_sem); 589 + mutex_unlock(&dev->ctxlist_mutex); 590 590 591 591 return 0; 592 592 }
+2 -2
drivers/char/drm/drm_drv.c
··· 151 151 if (dev->irq_enabled) 152 152 drm_irq_uninstall(dev); 153 153 154 - down(&dev->struct_sem); 154 + mutex_lock(&dev->struct_mutex); 155 155 del_timer(&dev->timer); 156 156 157 157 /* Clear pid list */ ··· 231 231 dev->lock.filp = NULL; 232 232 wake_up_interruptible(&dev->lock.lock_queue); 233 233 } 234 - up(&dev->struct_sem); 234 + mutex_unlock(&dev->struct_mutex); 235 235 236 236 DRM_DEBUG("lastclose completed\n"); 237 237 return 0;
+6 -6
drivers/char/drm/drm_fops.c
··· 262 262 goto out_free; 263 263 } 264 264 265 - down(&dev->struct_sem); 265 + mutex_lock(&dev->struct_mutex); 266 266 if (!dev->file_last) { 267 267 priv->next = NULL; 268 268 priv->prev = NULL; ··· 276 276 dev->file_last->next = priv; 277 277 dev->file_last = priv; 278 278 } 279 - up(&dev->struct_sem); 279 + mutex_unlock(&dev->struct_mutex); 280 280 281 281 #ifdef __alpha__ 282 282 /* ··· 413 413 414 414 drm_fasync(-1, filp, 0); 415 415 416 - down(&dev->ctxlist_sem); 416 + mutex_lock(&dev->ctxlist_mutex); 417 417 if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) { 418 418 drm_ctx_list_t *pos, *n; 419 419 ··· 432 432 } 433 433 } 434 434 } 435 - up(&dev->ctxlist_sem); 435 + mutex_unlock(&dev->ctxlist_mutex); 436 436 437 - down(&dev->struct_sem); 437 + mutex_lock(&dev->struct_mutex); 438 438 if (priv->remove_auth_on_close == 1) { 439 439 drm_file_t *temp = dev->file_first; 440 440 while (temp) { ··· 452 452 } else { 453 453 dev->file_last = priv->prev; 454 454 } 455 - up(&dev->struct_sem); 455 + mutex_unlock(&dev->struct_mutex); 456 456 457 457 if (dev->driver->postclose) 458 458 dev->driver->postclose(dev, priv);
+9 -9
drivers/char/drm/drm_ioctl.c
··· 194 194 return -EFAULT; 195 195 idx = map.offset; 196 196 197 - down(&dev->struct_sem); 197 + mutex_lock(&dev->struct_mutex); 198 198 if (idx < 0) { 199 - up(&dev->struct_sem); 199 + mutex_unlock(&dev->struct_mutex); 200 200 return -EINVAL; 201 201 } 202 202 ··· 209 209 i++; 210 210 } 211 211 if (!r_list || !r_list->map) { 212 - up(&dev->struct_sem); 212 + mutex_unlock(&dev->struct_mutex); 213 213 return -EINVAL; 214 214 } 215 215 ··· 219 219 map.flags = r_list->map->flags; 220 220 map.handle = (void *)(unsigned long)r_list->user_token; 221 221 map.mtrr = r_list->map->mtrr; 222 - up(&dev->struct_sem); 222 + mutex_unlock(&dev->struct_mutex); 223 223 224 224 if (copy_to_user(argp, &map, sizeof(map))) 225 225 return -EFAULT; ··· 253 253 if (copy_from_user(&client, argp, sizeof(client))) 254 254 return -EFAULT; 255 255 idx = client.idx; 256 - down(&dev->struct_sem); 256 + mutex_lock(&dev->struct_mutex); 257 257 for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ; 258 258 259 259 if (!pt) { 260 - up(&dev->struct_sem); 260 + mutex_unlock(&dev->struct_mutex); 261 261 return -EINVAL; 262 262 } 263 263 client.auth = pt->authenticated; ··· 265 265 client.uid = pt->uid; 266 266 client.magic = pt->magic; 267 267 client.iocs = pt->ioctl_count; 268 - up(&dev->struct_sem); 268 + mutex_unlock(&dev->struct_mutex); 269 269 270 270 if (copy_to_user(argp, &client, sizeof(client))) 271 271 return -EFAULT; ··· 292 292 293 293 memset(&stats, 0, sizeof(stats)); 294 294 295 - down(&dev->struct_sem); 295 + mutex_lock(&dev->struct_mutex); 296 296 297 297 for (i = 0; i < dev->counters; i++) { 298 298 if (dev->types[i] == _DRM_STAT_LOCK) ··· 305 305 306 306 stats.count = dev->counters; 307 307 308 - up(&dev->struct_sem); 308 + mutex_unlock(&dev->struct_mutex); 309 309 310 310 if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) 311 311 return -EFAULT;
+8 -8
drivers/char/drm/drm_irq.c
··· 98 98 if (dev->irq == 0) 99 99 return -EINVAL; 100 100 101 - down(&dev->struct_sem); 101 + mutex_lock(&dev->struct_mutex); 102 102 103 103 /* Driver must have been initialized */ 104 104 if (!dev->dev_private) { 105 - up(&dev->struct_sem); 105 + mutex_unlock(&dev->struct_mutex); 106 106 return -EINVAL; 107 107 } 108 108 109 109 if (dev->irq_enabled) { 110 - up(&dev->struct_sem); 110 + mutex_unlock(&dev->struct_mutex); 111 111 return -EBUSY; 112 112 } 113 113 dev->irq_enabled = 1; 114 - up(&dev->struct_sem); 114 + mutex_unlock(&dev->struct_mutex); 115 115 116 116 DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); 117 117 ··· 135 135 ret = request_irq(dev->irq, dev->driver->irq_handler, 136 136 sh_flags, dev->devname, dev); 137 137 if (ret < 0) { 138 - down(&dev->struct_sem); 138 + mutex_lock(&dev->struct_mutex); 139 139 dev->irq_enabled = 0; 140 - up(&dev->struct_sem); 140 + mutex_unlock(&dev->struct_mutex); 141 141 return ret; 142 142 } 143 143 ··· 161 161 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 162 162 return -EINVAL; 163 163 164 - down(&dev->struct_sem); 164 + mutex_lock(&dev->struct_mutex); 165 165 irq_enabled = dev->irq_enabled; 166 166 dev->irq_enabled = 0; 167 - up(&dev->struct_sem); 167 + mutex_unlock(&dev->struct_mutex); 168 168 169 169 if (!irq_enabled) 170 170 return -EINVAL;
+14 -14
drivers/char/drm/drm_proc.c
··· 258 258 } 259 259 260 260 /** 261 - * Simply calls _vm_info() while holding the drm_device::struct_sem lock. 261 + * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. 262 262 */ 263 263 static int drm_vm_info(char *buf, char **start, off_t offset, int request, 264 264 int *eof, void *data) ··· 266 266 drm_device_t *dev = (drm_device_t *) data; 267 267 int ret; 268 268 269 - down(&dev->struct_sem); 269 + mutex_lock(&dev->struct_mutex); 270 270 ret = drm__vm_info(buf, start, offset, request, eof, data); 271 - up(&dev->struct_sem); 271 + mutex_unlock(&dev->struct_mutex); 272 272 return ret; 273 273 } 274 274 ··· 331 331 } 332 332 333 333 /** 334 - * Simply calls _queues_info() while holding the drm_device::struct_sem lock. 334 + * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. 335 335 */ 336 336 static int drm_queues_info(char *buf, char **start, off_t offset, int request, 337 337 int *eof, void *data) ··· 339 339 drm_device_t *dev = (drm_device_t *) data; 340 340 int ret; 341 341 342 - down(&dev->struct_sem); 342 + mutex_lock(&dev->struct_mutex); 343 343 ret = drm__queues_info(buf, start, offset, request, eof, data); 344 - up(&dev->struct_sem); 344 + mutex_unlock(&dev->struct_mutex); 345 345 return ret; 346 346 } 347 347 ··· 403 403 } 404 404 405 405 /** 406 - * Simply calls _bufs_info() while holding the drm_device::struct_sem lock. 406 + * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. 407 407 */ 408 408 static int drm_bufs_info(char *buf, char **start, off_t offset, int request, 409 409 int *eof, void *data) ··· 411 411 drm_device_t *dev = (drm_device_t *) data; 412 412 int ret; 413 413 414 - down(&dev->struct_sem); 414 + mutex_lock(&dev->struct_mutex); 415 415 ret = drm__bufs_info(buf, start, offset, request, eof, data); 416 - up(&dev->struct_sem); 416 + mutex_unlock(&dev->struct_mutex); 417 417 return ret; 418 418 } 419 419 ··· 459 459 } 460 460 461 461 /** 462 - * Simply calls _clients_info() while holding the drm_device::struct_sem lock. 462 + * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. 463 463 */ 464 464 static int drm_clients_info(char *buf, char **start, off_t offset, 465 465 int request, int *eof, void *data) ··· 467 467 drm_device_t *dev = (drm_device_t *) data; 468 468 int ret; 469 469 470 - down(&dev->struct_sem); 470 + mutex_lock(&dev->struct_mutex); 471 471 ret = drm__clients_info(buf, start, offset, request, eof, data); 472 - up(&dev->struct_sem); 472 + mutex_unlock(&dev->struct_mutex); 473 473 return ret; 474 474 } 475 475 ··· 540 540 drm_device_t *dev = (drm_device_t *) data; 541 541 int ret; 542 542 543 - down(&dev->struct_sem); 543 + mutex_lock(&dev->struct_mutex); 544 544 ret = drm__vma_info(buf, start, offset, request, eof, data); 545 - up(&dev->struct_sem); 545 + mutex_unlock(&dev->struct_mutex); 546 546 return ret; 547 547 } 548 548 #endif
+2 -2
drivers/char/drm/drm_stub.c
··· 61 61 62 62 spin_lock_init(&dev->count_lock); 63 63 init_timer(&dev->timer); 64 - sema_init(&dev->struct_sem, 1); 65 - sema_init(&dev->ctxlist_sem, 1); 64 + mutex_init(&dev->struct_mutex); 65 + mutex_init(&dev->ctxlist_mutex); 66 66 67 67 dev->pdev = pdev; 68 68
+6 -6
drivers/char/drm/drm_vm.c
··· 188 188 189 189 map = vma->vm_private_data; 190 190 191 - down(&dev->struct_sem); 191 + mutex_lock(&dev->struct_mutex); 192 192 for (pt = dev->vmalist, prev = NULL; pt; pt = next) { 193 193 next = pt->next; 194 194 if (pt->vma->vm_private_data == map) ··· 248 248 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 249 249 } 250 250 } 251 - up(&dev->struct_sem); 251 + mutex_unlock(&dev->struct_mutex); 252 252 } 253 253 254 254 /** ··· 404 404 405 405 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 406 406 if (vma_entry) { 407 - down(&dev->struct_sem); 407 + mutex_lock(&dev->struct_mutex); 408 408 vma_entry->vma = vma; 409 409 vma_entry->next = dev->vmalist; 410 410 vma_entry->pid = current->pid; 411 411 dev->vmalist = vma_entry; 412 - up(&dev->struct_sem); 412 + mutex_unlock(&dev->struct_mutex); 413 413 } 414 414 } 415 415 ··· 431 431 vma->vm_start, vma->vm_end - vma->vm_start); 432 432 atomic_dec(&dev->vma_count); 433 433 434 - down(&dev->struct_sem); 434 + mutex_lock(&dev->struct_mutex); 435 435 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 436 436 if (pt->vma == vma) { 437 437 if (prev) { ··· 443 443 break; 444 444 } 445 445 } 446 - up(&dev->struct_sem); 446 + mutex_unlock(&dev->struct_mutex); 447 447 } 448 448 449 449 /**