Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (c) 2009, Microsoft Corporation.
5 *
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/hyperv.h>
16#include <linux/uio.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/prefetch.h>
20
21#include "hyperv_vmbus.h"
22
23#define VMBUS_PKT_TRAILER 8
24
25/*
26 * When we write to the ring buffer, check if the host needs to
27 * be signaled. Here is the details of this protocol:
28 *
29 * 1. The host guarantees that while it is draining the
30 * ring buffer, it will set the interrupt_mask to
31 * indicate it does not need to be interrupted when
32 * new data is placed.
33 *
34 * 2. The host guarantees that it will completely drain
35 * the ring buffer before exiting the read loop. Further,
36 * once the ring buffer is empty, it will clear the
37 * interrupt_mask and re-check to see if new data has
38 * arrived.
39 *
40 * KYS: Oct. 30, 2016:
41 * It looks like Windows hosts have logic to deal with DOS attacks that
42 * can be triggered if it receives interrupts when it is not expecting
43 * the interrupt. The host expects interrupts only when the ring
44 * transitions from empty to non-empty (or full to non full on the guest
45 * to host ring).
46 * So, base the signaling decision solely on the ring state until the
47 * host logic is fixed.
48 */
49
50static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
51{
52 struct hv_ring_buffer_info *rbi = &channel->outbound;
53
54 virt_mb();
55 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
56 return;
57
58 /* check interrupt_mask before read_index */
59 virt_rmb();
60 /*
61 * This is the only case we need to signal when the
62 * ring transitions from being empty to non-empty.
63 */
64 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
65 ++channel->intr_out_empty;
66 vmbus_setevent(channel);
67 }
68}
69
70/* Get the next write location for the specified ring buffer. */
71static inline u32
72hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
73{
74 u32 next = ring_info->ring_buffer->write_index;
75
76 return next;
77}
78
79/* Set the next write location for the specified ring buffer. */
80static inline void
81hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
82 u32 next_write_location)
83{
84 ring_info->ring_buffer->write_index = next_write_location;
85}
86
87/* Get the size of the ring buffer. */
88static inline u32
89hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
90{
91 return ring_info->ring_datasize;
92}
93
94/* Get the read and write indices as u64 of the specified ring buffer. */
95static inline u64
96hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
97{
98 return (u64)ring_info->ring_buffer->write_index << 32;
99}
100
101/*
102 * Helper routine to copy from source to ring buffer.
103 * Assume there is enough room. Handles wrap-around in dest case only!!
104 */
105static u32 hv_copyto_ringbuffer(
106 struct hv_ring_buffer_info *ring_info,
107 u32 start_write_offset,
108 const void *src,
109 u32 srclen)
110{
111 void *ring_buffer = hv_get_ring_buffer(ring_info);
112 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
113
114 memcpy(ring_buffer + start_write_offset, src, srclen);
115
116 start_write_offset += srclen;
117 if (start_write_offset >= ring_buffer_size)
118 start_write_offset -= ring_buffer_size;
119
120 return start_write_offset;
121}
122
123/*
124 *
125 * hv_get_ringbuffer_availbytes()
126 *
127 * Get number of bytes available to read and to write to
128 * for the specified ring buffer
129 */
130static void
131hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
132 u32 *read, u32 *write)
133{
134 u32 read_loc, write_loc, dsize;
135
136 /* Capture the read/write indices before they changed */
137 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
138 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
139 dsize = rbi->ring_datasize;
140
141 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
142 read_loc - write_loc;
143 *read = dsize - *write;
144}
145
146/* Get various debug metrics for the specified ring buffer. */
147int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
148 struct hv_ring_buffer_debug_info *debug_info)
149{
150 u32 bytes_avail_towrite;
151 u32 bytes_avail_toread;
152
153 mutex_lock(&ring_info->ring_buffer_mutex);
154
155 if (!ring_info->ring_buffer) {
156 mutex_unlock(&ring_info->ring_buffer_mutex);
157 return -EINVAL;
158 }
159
160 hv_get_ringbuffer_availbytes(ring_info,
161 &bytes_avail_toread,
162 &bytes_avail_towrite);
163 debug_info->bytes_avail_toread = bytes_avail_toread;
164 debug_info->bytes_avail_towrite = bytes_avail_towrite;
165 debug_info->current_read_index = ring_info->ring_buffer->read_index;
166 debug_info->current_write_index = ring_info->ring_buffer->write_index;
167 debug_info->current_interrupt_mask
168 = ring_info->ring_buffer->interrupt_mask;
169 mutex_unlock(&ring_info->ring_buffer_mutex);
170
171 return 0;
172}
173EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
174
175/* Initialize a channel's ring buffer info mutex locks */
176void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
177{
178 mutex_init(&channel->inbound.ring_buffer_mutex);
179 mutex_init(&channel->outbound.ring_buffer_mutex);
180}
181
182/* Initialize the ring buffer. */
183int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
184 struct page *pages, u32 page_cnt, u32 max_pkt_size)
185{
186 int i;
187 struct page **pages_wraparound;
188
189 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
190
191 /*
192 * First page holds struct hv_ring_buffer, do wraparound mapping for
193 * the rest.
194 */
195 pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
196 GFP_KERNEL);
197 if (!pages_wraparound)
198 return -ENOMEM;
199
200 pages_wraparound[0] = pages;
201 for (i = 0; i < 2 * (page_cnt - 1); i++)
202 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
203
204 ring_info->ring_buffer = (struct hv_ring_buffer *)
205 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
206
207 kfree(pages_wraparound);
208
209
210 if (!ring_info->ring_buffer)
211 return -ENOMEM;
212
213 ring_info->ring_buffer->read_index =
214 ring_info->ring_buffer->write_index = 0;
215
216 /* Set the feature bit for enabling flow control. */
217 ring_info->ring_buffer->feature_bits.value = 1;
218
219 ring_info->ring_size = page_cnt << PAGE_SHIFT;
220 ring_info->ring_size_div10_reciprocal =
221 reciprocal_value(ring_info->ring_size / 10);
222 ring_info->ring_datasize = ring_info->ring_size -
223 sizeof(struct hv_ring_buffer);
224 ring_info->priv_read_index = 0;
225
226 /* Initialize buffer that holds copies of incoming packets */
227 if (max_pkt_size) {
228 ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
229 if (!ring_info->pkt_buffer)
230 return -ENOMEM;
231 ring_info->pkt_buffer_size = max_pkt_size;
232 }
233
234 spin_lock_init(&ring_info->ring_lock);
235
236 return 0;
237}
238
239/* Cleanup the ring buffer. */
240void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
241{
242 mutex_lock(&ring_info->ring_buffer_mutex);
243 vunmap(ring_info->ring_buffer);
244 ring_info->ring_buffer = NULL;
245 mutex_unlock(&ring_info->ring_buffer_mutex);
246
247 kfree(ring_info->pkt_buffer);
248 ring_info->pkt_buffer_size = 0;
249}
250
251/* Write to the ring buffer. */
252int hv_ringbuffer_write(struct vmbus_channel *channel,
253 const struct kvec *kv_list, u32 kv_count,
254 u64 requestid)
255{
256 int i;
257 u32 bytes_avail_towrite;
258 u32 totalbytes_towrite = sizeof(u64);
259 u32 next_write_location;
260 u32 old_write;
261 u64 prev_indices;
262 unsigned long flags;
263 struct hv_ring_buffer_info *outring_info = &channel->outbound;
264 struct vmpacket_descriptor *desc = kv_list[0].iov_base;
265 u64 rqst_id = VMBUS_NO_RQSTOR;
266
267 if (channel->rescind)
268 return -ENODEV;
269
270 for (i = 0; i < kv_count; i++)
271 totalbytes_towrite += kv_list[i].iov_len;
272
273 spin_lock_irqsave(&outring_info->ring_lock, flags);
274
275 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
276
277 /*
278 * If there is only room for the packet, assume it is full.
279 * Otherwise, the next time around, we think the ring buffer
280 * is empty since the read index == write index.
281 */
282 if (bytes_avail_towrite <= totalbytes_towrite) {
283 ++channel->out_full_total;
284
285 if (!channel->out_full_flag) {
286 ++channel->out_full_first;
287 channel->out_full_flag = true;
288 }
289
290 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
291 return -EAGAIN;
292 }
293
294 channel->out_full_flag = false;
295
296 /* Write to the ring buffer */
297 next_write_location = hv_get_next_write_location(outring_info);
298
299 old_write = next_write_location;
300
301 for (i = 0; i < kv_count; i++) {
302 next_write_location = hv_copyto_ringbuffer(outring_info,
303 next_write_location,
304 kv_list[i].iov_base,
305 kv_list[i].iov_len);
306 }
307
308 /*
309 * Allocate the request ID after the data has been copied into the
310 * ring buffer. Once this request ID is allocated, the completion
311 * path could find the data and free it.
312 */
313
314 if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
315 if (channel->next_request_id_callback != NULL) {
316 rqst_id = channel->next_request_id_callback(channel, requestid);
317 if (rqst_id == VMBUS_RQST_ERROR) {
318 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
319 return -EAGAIN;
320 }
321 }
322 }
323 desc = hv_get_ring_buffer(outring_info) + old_write;
324 desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
325
326 /* Set previous packet start */
327 prev_indices = hv_get_ring_bufferindices(outring_info);
328
329 next_write_location = hv_copyto_ringbuffer(outring_info,
330 next_write_location,
331 &prev_indices,
332 sizeof(u64));
333
334 /* Issue a full memory barrier before updating the write index */
335 virt_mb();
336
337 /* Now, update the write location */
338 hv_set_next_write_location(outring_info, next_write_location);
339
340
341 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
342
343 hv_signal_on_write(old_write, channel);
344
345 if (channel->rescind) {
346 if (rqst_id != VMBUS_NO_RQSTOR) {
347 /* Reclaim request ID to avoid leak of IDs */
348 if (channel->request_addr_callback != NULL)
349 channel->request_addr_callback(channel, rqst_id);
350 }
351 return -ENODEV;
352 }
353
354 return 0;
355}
356
357int hv_ringbuffer_read(struct vmbus_channel *channel,
358 void *buffer, u32 buflen, u32 *buffer_actual_len,
359 u64 *requestid, bool raw)
360{
361 struct vmpacket_descriptor *desc;
362 u32 packetlen, offset;
363
364 if (unlikely(buflen == 0))
365 return -EINVAL;
366
367 *buffer_actual_len = 0;
368 *requestid = 0;
369
370 /* Make sure there is something to read */
371 desc = hv_pkt_iter_first(channel);
372 if (desc == NULL) {
373 /*
374 * No error is set when there is even no header, drivers are
375 * supposed to analyze buffer_actual_len.
376 */
377 return 0;
378 }
379
380 offset = raw ? 0 : (desc->offset8 << 3);
381 packetlen = (desc->len8 << 3) - offset;
382 *buffer_actual_len = packetlen;
383 *requestid = desc->trans_id;
384
385 if (unlikely(packetlen > buflen))
386 return -ENOBUFS;
387
388 /* since ring is double mapped, only one copy is necessary */
389 memcpy(buffer, (const char *)desc + offset, packetlen);
390
391 /* Advance ring index to next packet descriptor */
392 __hv_pkt_iter_next(channel, desc, true);
393
394 /* Notify host of update */
395 hv_pkt_iter_close(channel);
396
397 return 0;
398}
399
400/*
401 * Determine number of bytes available in ring buffer after
402 * the current iterator (priv_read_index) location.
403 *
404 * This is similar to hv_get_bytes_to_read but with private
405 * read index instead.
406 */
407static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
408{
409 u32 priv_read_loc = rbi->priv_read_index;
410 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
411
412 if (write_loc >= priv_read_loc)
413 return write_loc - priv_read_loc;
414 else
415 return (rbi->ring_datasize - priv_read_loc) + write_loc;
416}
417
418/*
419 * Get first vmbus packet without copying it out of the ring buffer
420 */
421struct vmpacket_descriptor *hv_pkt_iter_first_raw(struct vmbus_channel *channel)
422{
423 struct hv_ring_buffer_info *rbi = &channel->inbound;
424
425 hv_debug_delay_test(channel, MESSAGE_DELAY);
426
427 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
428 return NULL;
429
430 return (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
431}
432EXPORT_SYMBOL_GPL(hv_pkt_iter_first_raw);
433
434/*
435 * Get first vmbus packet from ring buffer after read_index
436 *
437 * If ring buffer is empty, returns NULL and no other action needed.
438 */
439struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
440{
441 struct hv_ring_buffer_info *rbi = &channel->inbound;
442 struct vmpacket_descriptor *desc, *desc_copy;
443 u32 bytes_avail, pkt_len, pkt_offset;
444
445 desc = hv_pkt_iter_first_raw(channel);
446 if (!desc)
447 return NULL;
448
449 bytes_avail = min(rbi->pkt_buffer_size, hv_pkt_iter_avail(rbi));
450
451 /*
452 * Ensure the compiler does not use references to incoming Hyper-V values (which
453 * could change at any moment) when reading local variables later in the code
454 */
455 pkt_len = READ_ONCE(desc->len8) << 3;
456 pkt_offset = READ_ONCE(desc->offset8) << 3;
457
458 /*
459 * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
460 * rbi->pkt_buffer_size
461 */
462 if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
463 pkt_len = bytes_avail;
464
465 /*
466 * If pkt_offset is invalid, arbitrarily set it to
467 * the size of vmpacket_descriptor
468 */
469 if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
470 pkt_offset = sizeof(struct vmpacket_descriptor);
471
472 /* Copy the Hyper-V packet out of the ring buffer */
473 desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
474 memcpy(desc_copy, desc, pkt_len);
475
476 /*
477 * Hyper-V could still change len8 and offset8 after the earlier read.
478 * Ensure that desc_copy has legal values for len8 and offset8 that
479 * are consistent with the copy we just made
480 */
481 desc_copy->len8 = pkt_len >> 3;
482 desc_copy->offset8 = pkt_offset >> 3;
483
484 return desc_copy;
485}
486EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
487
488/*
489 * Get next vmbus packet from ring buffer.
490 *
491 * Advances the current location (priv_read_index) and checks for more
492 * data. If the end of the ring buffer is reached, then return NULL.
493 */
494struct vmpacket_descriptor *
495__hv_pkt_iter_next(struct vmbus_channel *channel,
496 const struct vmpacket_descriptor *desc,
497 bool copy)
498{
499 struct hv_ring_buffer_info *rbi = &channel->inbound;
500 u32 packetlen = desc->len8 << 3;
501 u32 dsize = rbi->ring_datasize;
502
503 hv_debug_delay_test(channel, MESSAGE_DELAY);
504 /* bump offset to next potential packet */
505 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
506 if (rbi->priv_read_index >= dsize)
507 rbi->priv_read_index -= dsize;
508
509 /* more data? */
510 return copy ? hv_pkt_iter_first(channel) : hv_pkt_iter_first_raw(channel);
511}
512EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
513
514/* How many bytes were read in this iterator cycle */
515static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
516 u32 start_read_index)
517{
518 if (rbi->priv_read_index >= start_read_index)
519 return rbi->priv_read_index - start_read_index;
520 else
521 return rbi->ring_datasize - start_read_index +
522 rbi->priv_read_index;
523}
524
525/*
526 * Update host ring buffer after iterating over packets. If the host has
527 * stopped queuing new entries because it found the ring buffer full, and
528 * sufficient space is being freed up, signal the host. But be careful to
529 * only signal the host when necessary, both for performance reasons and
530 * because Hyper-V protects itself by throttling guests that signal
531 * inappropriately.
532 *
533 * Determining when to signal is tricky. There are three key data inputs
534 * that must be handled in this order to avoid race conditions:
535 *
536 * 1. Update the read_index
537 * 2. Read the pending_send_sz
538 * 3. Read the current write_index
539 *
540 * The interrupt_mask is not used to determine when to signal. The
541 * interrupt_mask is used only on the guest->host ring buffer when
542 * sending requests to the host. The host does not use it on the host->
543 * guest ring buffer to indicate whether it should be signaled.
544 */
545void hv_pkt_iter_close(struct vmbus_channel *channel)
546{
547 struct hv_ring_buffer_info *rbi = &channel->inbound;
548 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
549
550 /*
551 * Make sure all reads are done before we update the read index since
552 * the writer may start writing to the read area once the read index
553 * is updated.
554 */
555 virt_rmb();
556 start_read_index = rbi->ring_buffer->read_index;
557 rbi->ring_buffer->read_index = rbi->priv_read_index;
558
559 /*
560 * Older versions of Hyper-V (before WS2102 and Win8) do not
561 * implement pending_send_sz and simply poll if the host->guest
562 * ring buffer is full. No signaling is needed or expected.
563 */
564 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
565 return;
566
567 /*
568 * Issue a full memory barrier before making the signaling decision.
569 * If reading pending_send_sz were to be reordered and happen
570 * before we commit the new read_index, a race could occur. If the
571 * host were to set the pending_send_sz after we have sampled
572 * pending_send_sz, and the ring buffer blocks before we commit the
573 * read index, we could miss sending the interrupt. Issue a full
574 * memory barrier to address this.
575 */
576 virt_mb();
577
578 /*
579 * If the pending_send_sz is zero, then the ring buffer is not
580 * blocked and there is no need to signal. This is far by the
581 * most common case, so exit quickly for best performance.
582 */
583 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
584 if (!pending_sz)
585 return;
586
587 /*
588 * Ensure the read of write_index in hv_get_bytes_to_write()
589 * happens after the read of pending_send_sz.
590 */
591 virt_rmb();
592 curr_write_sz = hv_get_bytes_to_write(rbi);
593 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
594
595 /*
596 * We want to signal the host only if we're transitioning
597 * from a "not enough free space" state to a "enough free
598 * space" state. For example, it's possible that this function
599 * could run and free up enough space to signal the host, and then
600 * run again and free up additional space before the host has a
601 * chance to clear the pending_send_sz. The 2nd invocation would
602 * be a null transition from "enough free space" to "enough free
603 * space", which doesn't warrant a signal.
604 *
605 * Exactly filling the ring buffer is treated as "not enough
606 * space". The ring buffer always must have at least one byte
607 * empty so the empty and full conditions are distinguishable.
608 * hv_get_bytes_to_write() doesn't fully tell the truth in
609 * this regard.
610 *
611 * So first check if we were in the "enough free space" state
612 * before we began the iteration. If so, the host was not
613 * blocked, and there's no need to signal.
614 */
615 if (curr_write_sz - bytes_read > pending_sz)
616 return;
617
618 /*
619 * Similarly, if the new state is "not enough space", then
620 * there's no need to signal.
621 */
622 if (curr_write_sz <= pending_sz)
623 return;
624
625 ++channel->intr_in_full;
626 vmbus_setevent(channel);
627}
628EXPORT_SYMBOL_GPL(hv_pkt_iter_close);