Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Drivers: hv: vmbus: Give control over how the ring access is serialized

On the channel send side, many of the VMBUS
device drivers explicity serialize access to the
outgoing ring buffer. Give more control to the
VMBUS device drivers in terms how to serialize
accesss to the outgoing ring buffer.
The default behavior will be to aquire the
ring lock to preserve the current behavior.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

K. Y. Srinivasan and committed by
Greg Kroah-Hartman
fe760e4d 3eba9a77

+37 -10
+11 -4
drivers/hv/channel.c
··· 639 639 u64 aligned_data = 0; 640 640 int ret; 641 641 bool signal = false; 642 + bool lock = channel->acquire_ring_lock; 642 643 int num_vecs = ((bufferlen != 0) ? 3 : 1); 643 644 644 645 ··· 659 658 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 660 659 661 660 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, 662 - &signal); 661 + &signal, lock); 663 662 664 663 /* 665 664 * Signalling the host is conditional on many factors: ··· 739 738 struct kvec bufferlist[3]; 740 739 u64 aligned_data = 0; 741 740 bool signal = false; 741 + bool lock = channel->acquire_ring_lock; 742 742 743 743 if (pagecount > MAX_PAGE_BUFFER_COUNT) 744 744 return -EINVAL; ··· 776 774 bufferlist[2].iov_base = &aligned_data; 777 775 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 778 776 779 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 777 + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 778 + &signal, lock); 780 779 781 780 /* 782 781 * Signalling the host is conditional on many factors: ··· 840 837 struct kvec bufferlist[3]; 841 838 u64 aligned_data = 0; 842 839 bool signal = false; 840 + bool lock = channel->acquire_ring_lock; 843 841 844 842 packetlen = desc_size + bufferlen; 845 843 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); ··· 860 856 bufferlist[2].iov_base = &aligned_data; 861 857 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 862 858 863 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 859 + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 860 + &signal, lock); 864 861 865 862 if (ret == 0 && signal) 866 863 vmbus_setevent(channel); ··· 886 881 struct kvec bufferlist[3]; 887 882 u64 aligned_data = 0; 888 883 bool signal = false; 884 + bool lock = channel->acquire_ring_lock; 889 885 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 890 886 multi_pagebuffer->len); 891 887 ··· 925 919 bufferlist[2].iov_base = &aligned_data; 926 920 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 927 921 928 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 922 + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 923 + &signal, lock); 929 924 930 925 if (ret == 0 && signal) 931 926 vmbus_setevent(channel);
+1
drivers/hv/channel_mgmt.c
··· 259 259 return NULL; 260 260 261 261 channel->id = atomic_inc_return(&chan_num); 262 + channel->acquire_ring_lock = true; 262 263 spin_lock_init(&channel->inbound_lock); 263 264 spin_lock_init(&channel->lock); 264 265
+1 -1
drivers/hv/hyperv_vmbus.h
··· 529 529 530 530 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 531 531 struct kvec *kv_list, 532 - u32 kv_count, bool *signal); 532 + u32 kv_count, bool *signal, bool lock); 533 533 534 534 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 535 535 void *buffer, u32 buflen, u32 *buffer_actual_len,
+8 -5
drivers/hv/ring_buffer.c
··· 314 314 315 315 /* Write to the ring buffer. */ 316 316 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 317 - struct kvec *kv_list, u32 kv_count, bool *signal) 317 + struct kvec *kv_list, u32 kv_count, bool *signal, bool lock) 318 318 { 319 319 int i = 0; 320 320 u32 bytes_avail_towrite; ··· 324 324 u32 next_write_location; 325 325 u32 old_write; 326 326 u64 prev_indices = 0; 327 - unsigned long flags; 327 + unsigned long flags = 0; 328 328 329 329 for (i = 0; i < kv_count; i++) 330 330 totalbytes_towrite += kv_list[i].iov_len; 331 331 332 332 totalbytes_towrite += sizeof(u64); 333 333 334 - spin_lock_irqsave(&outring_info->ring_lock, flags); 334 + if (lock) 335 + spin_lock_irqsave(&outring_info->ring_lock, flags); 335 336 336 337 hv_get_ringbuffer_availbytes(outring_info, 337 338 &bytes_avail_toread, ··· 344 343 * is empty since the read index == write index. 345 344 */ 346 345 if (bytes_avail_towrite <= totalbytes_towrite) { 347 - spin_unlock_irqrestore(&outring_info->ring_lock, flags); 346 + if (lock) 347 + spin_unlock_irqrestore(&outring_info->ring_lock, flags); 348 348 return -EAGAIN; 349 349 } 350 350 ··· 376 374 hv_set_next_write_location(outring_info, next_write_location); 377 375 378 376 379 - spin_unlock_irqrestore(&outring_info->ring_lock, flags); 377 + if (lock) 378 + spin_unlock_irqrestore(&outring_info->ring_lock, flags); 380 379 381 380 *signal = hv_need_to_signal(old_write, outring_info); 382 381 return 0;
+16
include/linux/hyperv.h
··· 811 811 * signaling control. 812 812 */ 813 813 enum hv_signal_policy signal_policy; 814 + /* 815 + * On the channel send side, many of the VMBUS 816 + * device drivers explicity serialize access to the 817 + * outgoing ring buffer. Give more control to the 818 + * VMBUS device drivers in terms how to serialize 819 + * accesss to the outgoing ring buffer. 820 + * The default behavior will be to aquire the 821 + * ring lock to preserve the current behavior. 822 + */ 823 + bool acquire_ring_lock; 824 + 814 825 }; 826 + 827 + static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) 828 + { 829 + c->acquire_ring_lock = state; 830 + } 815 831 816 832 static inline bool is_hvsock_channel(const struct vmbus_channel *c) 817 833 {