Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: vc04_services: Move spinlocks to vchiq_state

The msg_queue_spinlock, quota_spinlock and bulk_waiter_spinlock
are allocated globally. Instead move them to struct vchiq_state
and initialise them in vchiq_init_state().

Signed-off-by: Umang Jain <umang.jain@ideasonboard.com>
Reviewed-by: Stefan Wahren <wahrenst@gmx.net>
Link: https://lore.kernel.org/r/20240412075743.60712-9-umang.jain@ideasonboard.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Umang Jain and committed by
Greg Kroah-Hartman
12cc5f92 6d0ef321

+47 -41
+8 -9
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
··· 59 59 #define KEEPALIVE_VER 1 60 60 #define KEEPALIVE_VER_MIN KEEPALIVE_VER 61 61 62 - DEFINE_SPINLOCK(msg_queue_spinlock); 63 62 struct vchiq_state g_state; 64 63 65 64 /* ··· 984 985 * This is not a retry of the previous one. 985 986 * Cancel the signal when the transfer completes. 986 987 */ 987 - spin_lock(&bulk_waiter_spinlock); 988 + spin_lock(&service->state->bulk_waiter_spinlock); 988 989 bulk->userdata = NULL; 989 - spin_unlock(&bulk_waiter_spinlock); 990 + spin_unlock(&service->state->bulk_waiter_spinlock); 990 991 } 991 992 } 992 993 } else { ··· 1003 1004 1004 1005 if (bulk) { 1005 1006 /* Cancel the signal when the transfer completes. */ 1006 - spin_lock(&bulk_waiter_spinlock); 1007 + spin_lock(&service->state->bulk_waiter_spinlock); 1007 1008 bulk->userdata = NULL; 1008 - spin_unlock(&bulk_waiter_spinlock); 1009 + spin_unlock(&service->state->bulk_waiter_spinlock); 1009 1010 } 1010 1011 kfree(waiter); 1011 1012 } else { ··· 1126 1127 reason, header, instance, bulk_userdata); 1127 1128 1128 1129 if (header && user_service->is_vchi) { 1129 - spin_lock(&msg_queue_spinlock); 1130 + spin_lock(&service->state->msg_queue_spinlock); 1130 1131 while (user_service->msg_insert == 1131 1132 (user_service->msg_remove + MSG_QUEUE_SIZE)) { 1132 - spin_unlock(&msg_queue_spinlock); 1133 + spin_unlock(&service->state->msg_queue_spinlock); 1133 1134 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1134 1135 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); 1135 1136 dev_dbg(service->state->dev, "arm: msg queue full\n"); ··· 1166 1167 return -EINVAL; 1167 1168 } 1168 1169 DEBUG_TRACE(SERVICE_CALLBACK_LINE); 1169 - spin_lock(&msg_queue_spinlock); 1170 + spin_lock(&service->state->msg_queue_spinlock); 1170 1171 } 1171 1172 1172 1173 user_service->msg_queue[user_service->msg_insert & ··· 1185 1186 skip_completion = true; 1186 1187 } 1187 1188 1188 - spin_unlock(&msg_queue_spinlock); 1189 + spin_unlock(&service->state->msg_queue_spinlock); 1189 1190 complete(&user_service->insert_event); 1190 1191 1191 1192 header = NULL;
-1
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
··· 98 98 struct vchiq_debugfs_node debugfs_node; 99 99 }; 100 100 101 - extern spinlock_t msg_queue_spinlock; 102 101 extern struct vchiq_state g_state; 103 102 104 103 extern struct vchiq_state *
+20 -19
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
··· 149 149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES); 150 150 } 151 151 152 - DEFINE_SPINLOCK(bulk_waiter_spinlock); 153 - static DEFINE_SPINLOCK(quota_spinlock); 154 - 155 152 static unsigned int handle_seq; 156 153 157 154 static const char *const srvstate_names[] = { ··· 721 724 struct vchiq_service_quota *quota = &state->service_quotas[port]; 722 725 int count; 723 726 724 - spin_lock(&quota_spinlock); 727 + spin_lock(&state->quota_spinlock); 725 728 count = quota->message_use_count; 726 729 if (count > 0) 727 730 quota->message_use_count = count - 1; 728 - spin_unlock(&quota_spinlock); 731 + spin_unlock(&state->quota_spinlock); 729 732 730 733 if (count == quota->message_quota) { 731 734 /* ··· 744 747 /* Set the found bit for this service */ 745 748 BITSET_SET(service_found, port); 746 749 747 - spin_lock(&quota_spinlock); 750 + spin_lock(&state->quota_spinlock); 748 751 count = quota->slot_use_count; 749 752 if (count > 0) 750 753 quota->slot_use_count = count - 1; 751 - spin_unlock(&quota_spinlock); 754 + spin_unlock(&state->quota_spinlock); 752 755 753 756 if (count > 0) { 754 757 /* ··· 834 837 if (data_found) { 835 838 int count; 836 839 837 - spin_lock(&quota_spinlock); 840 + spin_lock(&state->quota_spinlock); 838 841 count = state->data_use_count; 839 842 if (count > 0) 840 843 state->data_use_count = count - 1; 841 - spin_unlock(&quota_spinlock); 844 + spin_unlock(&state->quota_spinlock); 842 845 if (count == state->data_quota) 843 846 complete(&state->data_quota_event); 844 847 } ··· 937 940 938 941 quota = &state->service_quotas[service->localport]; 939 942 940 - spin_lock(&quota_spinlock); 943 + spin_lock(&state->quota_spinlock); 941 944 942 945 /* 943 946 * Ensure this service doesn't use more than its quota of ··· 952 955 while ((tx_end_index != state->previous_data_index) && 953 956 (state->data_use_count == state->data_quota)) { 954 957 VCHIQ_STATS_INC(state, data_stalls); 955 - spin_unlock(&quota_spinlock); 958 + spin_unlock(&state->quota_spinlock); 956 959 mutex_unlock(&state->slot_mutex); 957 960 958 961 if (wait_for_completion_interruptible(&state->data_quota_event)) 959 962 return -EAGAIN; 960 963 961 964 mutex_lock(&state->slot_mutex); 962 - spin_lock(&quota_spinlock); 965 + spin_lock(&state->quota_spinlock); 963 966 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); 964 967 if ((tx_end_index == state->previous_data_index) || 965 968 (state->data_use_count < state->data_quota)) { ··· 972 975 while ((quota->message_use_count == quota->message_quota) || 973 976 ((tx_end_index != quota->previous_tx_index) && 974 977 (quota->slot_use_count == quota->slot_quota))) { 975 - spin_unlock(&quota_spinlock); 978 + spin_unlock(&state->quota_spinlock); 976 979 dev_dbg(state->dev, 977 980 "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n", 978 981 state->id, service->localport, msg_type_str(type), size, ··· 990 993 mutex_unlock(&state->slot_mutex); 991 994 return -EHOSTDOWN; 992 995 } 993 - spin_lock(&quota_spinlock); 996 + spin_lock(&state->quota_spinlock); 994 997 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); 995 998 } 996 999 997 - spin_unlock(&quota_spinlock); 1000 + spin_unlock(&state->quota_spinlock); 998 1001 } 999 1002 1000 1003 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING); ··· 1037 1040 header->data, 1038 1041 min_t(size_t, 16, callback_result)); 1039 1042 1040 - spin_lock(&quota_spinlock); 1043 + spin_lock(&state->quota_spinlock); 1041 1044 quota->message_use_count++; 1042 1045 1043 1046 tx_end_index = ··· 1063 1066 slot_use_count = 0; 1064 1067 } 1065 1068 1066 - spin_unlock(&quota_spinlock); 1069 + spin_unlock(&state->quota_spinlock); 1067 1070 1068 1071 if (slot_use_count) 1069 1072 dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n", ··· 1319 1322 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) { 1320 1323 struct bulk_waiter *waiter; 1321 1324 1322 - spin_lock(&bulk_waiter_spinlock); 1325 + spin_lock(&service->state->bulk_waiter_spinlock); 1323 1326 waiter = bulk->userdata; 1324 1327 if (waiter) { 1325 1328 waiter->actual = bulk->actual; 1326 1329 complete(&waiter->event); 1327 1330 } 1328 - spin_unlock(&bulk_waiter_spinlock); 1331 + spin_unlock(&service->state->bulk_waiter_spinlock); 1329 1332 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) { 1330 1333 enum vchiq_reason reason = 1331 1334 get_bulk_reason(bulk); ··· 2165 2168 mutex_init(&state->recycle_mutex); 2166 2169 mutex_init(&state->sync_mutex); 2167 2170 mutex_init(&state->bulk_transfer_mutex); 2171 + 2172 + spin_lock_init(&state->msg_queue_spinlock); 2173 + spin_lock_init(&state->bulk_waiter_spinlock); 2174 + spin_lock_init(&state->quota_spinlock); 2168 2175 2169 2176 init_completion(&state->slot_available_event); 2170 2177 init_completion(&state->slot_remove_event);
+7
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
··· 11 11 #include <linux/kthread.h> 12 12 #include <linux/kref.h> 13 13 #include <linux/rcupdate.h> 14 + #include <linux/spinlock_types.h> 14 15 #include <linux/wait.h> 15 16 16 17 #include "../../include/linux/raspberrypi/vchiq.h" ··· 348 347 struct mutex sync_mutex; 349 348 350 349 struct mutex bulk_transfer_mutex; 350 + 351 + spinlock_t msg_queue_spinlock; 352 + 353 + spinlock_t bulk_waiter_spinlock; 354 + 355 + spinlock_t quota_spinlock; 351 356 352 357 /* 353 358 * Indicates the byte position within the stream from where the next
+12 -12
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
··· 220 220 goto out; 221 221 } 222 222 223 - spin_lock(&msg_queue_spinlock); 223 + spin_lock(&service->state->msg_queue_spinlock); 224 224 if (user_service->msg_remove == user_service->msg_insert) { 225 225 if (!args->blocking) { 226 - spin_unlock(&msg_queue_spinlock); 226 + spin_unlock(&service->state->msg_queue_spinlock); 227 227 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 228 228 ret = -EWOULDBLOCK; 229 229 goto out; ··· 231 231 user_service->dequeue_pending = 1; 232 232 ret = 0; 233 233 do { 234 - spin_unlock(&msg_queue_spinlock); 234 + spin_unlock(&service->state->msg_queue_spinlock); 235 235 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); 236 236 if (wait_for_completion_interruptible(&user_service->insert_event)) { 237 237 dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n"); 238 238 ret = -EINTR; 239 239 break; 240 240 } 241 - spin_lock(&msg_queue_spinlock); 241 + spin_lock(&service->state->msg_queue_spinlock); 242 242 } while (user_service->msg_remove == user_service->msg_insert); 243 243 244 244 if (ret) ··· 247 247 248 248 if (WARN_ON_ONCE((int)(user_service->msg_insert - 249 249 user_service->msg_remove) < 0)) { 250 - spin_unlock(&msg_queue_spinlock); 250 + spin_unlock(&service->state->msg_queue_spinlock); 251 251 ret = -EINVAL; 252 252 goto out; 253 253 } ··· 255 255 header = user_service->msg_queue[user_service->msg_remove & 256 256 (MSG_QUEUE_SIZE - 1)]; 257 257 user_service->msg_remove++; 258 - spin_unlock(&msg_queue_spinlock); 258 + spin_unlock(&service->state->msg_queue_spinlock); 259 259 260 260 complete(&user_service->remove_event); 261 261 if (!header) { ··· 340 340 !waiter->bulk_waiter.bulk) { 341 341 if (waiter->bulk_waiter.bulk) { 342 342 /* Cancel the signal when the transfer completes. */ 343 - spin_lock(&bulk_waiter_spinlock); 343 + spin_lock(&service->state->bulk_waiter_spinlock); 344 344 waiter->bulk_waiter.bulk->userdata = NULL; 345 - spin_unlock(&bulk_waiter_spinlock); 345 + spin_unlock(&service->state->bulk_waiter_spinlock); 346 346 } 347 347 kfree(waiter); 348 348 ret = 0; ··· 1246 1246 break; 1247 1247 } 1248 1248 1249 - spin_lock(&msg_queue_spinlock); 1249 + spin_lock(&service->state->msg_queue_spinlock); 1250 1250 1251 1251 while (user_service->msg_remove != user_service->msg_insert) { 1252 1252 struct vchiq_header *header; ··· 1254 1254 1255 1255 header = user_service->msg_queue[m]; 1256 1256 user_service->msg_remove++; 1257 - spin_unlock(&msg_queue_spinlock); 1257 + spin_unlock(&service->state->msg_queue_spinlock); 1258 1258 1259 1259 if (header) 1260 1260 vchiq_release_message(instance, service->handle, header); 1261 - spin_lock(&msg_queue_spinlock); 1261 + spin_lock(&service->state->msg_queue_spinlock); 1262 1262 } 1263 1263 1264 - spin_unlock(&msg_queue_spinlock); 1264 + spin_unlock(&service->state->msg_queue_spinlock); 1265 1265 1266 1266 vchiq_service_put(service); 1267 1267 }