Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/delay.h>
28#include <linux/io.h>
29#include <linux/slab.h>
30
31#include "hyperv.h"
32#include "hyperv_net.h"
33
34
35/* Globals */
36static const char *driver_name = "netvsc";
37
38/* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
39static const struct hv_guid netvsc_device_type = {
40 .data = {
41 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
42 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
43 }
44};
45
46
47static struct netvsc_device *alloc_net_device(struct hv_device *device)
48{
49 struct netvsc_device *net_device;
50
51 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
52 if (!net_device)
53 return NULL;
54
55 /* Set to 2 to allow both inbound and outbound traffic */
56 atomic_cmpxchg(&net_device->refcnt, 0, 2);
57
58 net_device->dev = device;
59 device->ext = net_device;
60
61 return net_device;
62}
63
64static void free_net_device(struct netvsc_device *device)
65{
66 WARN_ON(atomic_read(&device->refcnt) != 0);
67 device->dev->ext = NULL;
68 kfree(device);
69}
70
71
72/* Get the net device object iff exists and its refcount > 1 */
73static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
74{
75 struct netvsc_device *net_device;
76
77 net_device = device->ext;
78 if (net_device && atomic_read(&net_device->refcnt) > 1)
79 atomic_inc(&net_device->refcnt);
80 else
81 net_device = NULL;
82
83 return net_device;
84}
85
86/* Get the net device object iff exists and its refcount > 0 */
87static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
88{
89 struct netvsc_device *net_device;
90
91 net_device = device->ext;
92 if (net_device && atomic_read(&net_device->refcnt))
93 atomic_inc(&net_device->refcnt);
94 else
95 net_device = NULL;
96
97 return net_device;
98}
99
100static void put_net_device(struct hv_device *device)
101{
102 struct netvsc_device *net_device;
103
104 net_device = device->ext;
105
106 atomic_dec(&net_device->refcnt);
107}
108
109static struct netvsc_device *release_outbound_net_device(
110 struct hv_device *device)
111{
112 struct netvsc_device *net_device;
113
114 net_device = device->ext;
115 if (net_device == NULL)
116 return NULL;
117
118 /* Busy wait until the ref drop to 2, then set it to 1 */
119 while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
120 udelay(100);
121
122 return net_device;
123}
124
125static struct netvsc_device *release_inbound_net_device(
126 struct hv_device *device)
127{
128 struct netvsc_device *net_device;
129
130 net_device = device->ext;
131 if (net_device == NULL)
132 return NULL;
133
134 /* Busy wait until the ref drop to 1, then set it to 0 */
135 while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
136 udelay(100);
137
138 device->ext = NULL;
139 return net_device;
140}
141
142static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
143{
144 struct nvsp_message *revoke_packet;
145 int ret = 0;
146
147 /*
148 * If we got a section count, it means we received a
149 * SendReceiveBufferComplete msg (ie sent
150 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
151 * to send a revoke msg here
152 */
153 if (net_device->recv_section_cnt) {
154 /* Send the revoke receive buffer */
155 revoke_packet = &net_device->revoke_packet;
156 memset(revoke_packet, 0, sizeof(struct nvsp_message));
157
158 revoke_packet->hdr.msg_type =
159 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
160 revoke_packet->msg.v1_msg.
161 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
162
163 ret = vmbus_sendpacket(net_device->dev->channel,
164 revoke_packet,
165 sizeof(struct nvsp_message),
166 (unsigned long)revoke_packet,
167 VM_PKT_DATA_INBAND, 0);
168 /*
169 * If we failed here, we might as well return and
170 * have a leak rather than continue and a bugchk
171 */
172 if (ret != 0) {
173 dev_err(&net_device->dev->device, "unable to send "
174 "revoke receive buffer to netvsp");
175 return -1;
176 }
177 }
178
179 /* Teardown the gpadl on the vsp end */
180 if (net_device->recv_buf_gpadl_handle) {
181 ret = vmbus_teardown_gpadl(net_device->dev->channel,
182 net_device->recv_buf_gpadl_handle);
183
184 /* If we failed here, we might as well return and have a leak
185 * rather than continue and a bugchk
186 */
187 if (ret != 0) {
188 dev_err(&net_device->dev->device,
189 "unable to teardown receive buffer's gpadl");
190 return -1;
191 }
192 net_device->recv_buf_gpadl_handle = 0;
193 }
194
195 if (net_device->recv_buf) {
196 /* Free up the receive buffer */
197 free_pages((unsigned long)net_device->recv_buf,
198 get_order(net_device->recv_buf_size));
199 net_device->recv_buf = NULL;
200 }
201
202 if (net_device->recv_section) {
203 net_device->recv_section_cnt = 0;
204 kfree(net_device->recv_section);
205 net_device->recv_section = NULL;
206 }
207
208 return ret;
209}
210
211static int netvsc_init_recv_buf(struct hv_device *device)
212{
213 int ret = 0;
214 int t;
215 struct netvsc_device *net_device;
216 struct nvsp_message *init_packet;
217
218 net_device = get_outbound_net_device(device);
219 if (!net_device) {
220 dev_err(&device->device, "unable to get net device..."
221 "device being destroyed?");
222 return -1;
223 }
224
225 net_device->recv_buf =
226 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
227 get_order(net_device->recv_buf_size));
228 if (!net_device->recv_buf) {
229 dev_err(&device->device, "unable to allocate receive "
230 "buffer of size %d", net_device->recv_buf_size);
231 ret = -1;
232 goto cleanup;
233 }
234
235 /*
236 * Establish the gpadl handle for this buffer on this
237 * channel. Note: This call uses the vmbus connection rather
238 * than the channel to establish the gpadl handle.
239 */
240 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
241 net_device->recv_buf_size,
242 &net_device->recv_buf_gpadl_handle);
243 if (ret != 0) {
244 dev_err(&device->device,
245 "unable to establish receive buffer's gpadl");
246 goto cleanup;
247 }
248
249
250 /* Notify the NetVsp of the gpadl handle */
251 init_packet = &net_device->channel_init_pkt;
252
253 memset(init_packet, 0, sizeof(struct nvsp_message));
254
255 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
256 init_packet->msg.v1_msg.send_recv_buf.
257 gpadl_handle = net_device->recv_buf_gpadl_handle;
258 init_packet->msg.v1_msg.
259 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
260
261 /* Send the gpadl notification request */
262 ret = vmbus_sendpacket(device->channel, init_packet,
263 sizeof(struct nvsp_message),
264 (unsigned long)init_packet,
265 VM_PKT_DATA_INBAND,
266 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
267 if (ret != 0) {
268 dev_err(&device->device,
269 "unable to send receive buffer's gpadl to netvsp");
270 goto cleanup;
271 }
272
273 t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
274 BUG_ON(t == 0);
275
276
277 /* Check the response */
278 if (init_packet->msg.v1_msg.
279 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
280 dev_err(&device->device, "Unable to complete receive buffer "
281 "initialzation with NetVsp - status %d",
282 init_packet->msg.v1_msg.
283 send_recv_buf_complete.status);
284 ret = -1;
285 goto cleanup;
286 }
287
288 /* Parse the response */
289
290 net_device->recv_section_cnt = init_packet->msg.
291 v1_msg.send_recv_buf_complete.num_sections;
292
293 net_device->recv_section = kmalloc(net_device->recv_section_cnt
294 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
295 if (net_device->recv_section == NULL) {
296 ret = -1;
297 goto cleanup;
298 }
299
300 memcpy(net_device->recv_section,
301 init_packet->msg.v1_msg.
302 send_recv_buf_complete.sections,
303 net_device->recv_section_cnt *
304 sizeof(struct nvsp_1_receive_buffer_section));
305
306 /*
307 * For 1st release, there should only be 1 section that represents the
308 * entire receive buffer
309 */
310 if (net_device->recv_section_cnt != 1 ||
311 net_device->recv_section->offset != 0) {
312 ret = -1;
313 goto cleanup;
314 }
315
316 goto exit;
317
318cleanup:
319 netvsc_destroy_recv_buf(net_device);
320
321exit:
322 put_net_device(device);
323 return ret;
324}
325
326static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
327{
328 struct nvsp_message *revoke_packet;
329 int ret = 0;
330
331 /*
332 * If we got a section count, it means we received a
333 * SendReceiveBufferComplete msg (ie sent
334 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
335 * to send a revoke msg here
336 */
337 if (net_device->send_section_size) {
338 /* Send the revoke send buffer */
339 revoke_packet = &net_device->revoke_packet;
340 memset(revoke_packet, 0, sizeof(struct nvsp_message));
341
342 revoke_packet->hdr.msg_type =
343 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
344 revoke_packet->msg.v1_msg.
345 revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
346
347 ret = vmbus_sendpacket(net_device->dev->channel,
348 revoke_packet,
349 sizeof(struct nvsp_message),
350 (unsigned long)revoke_packet,
351 VM_PKT_DATA_INBAND, 0);
352 /*
353 * If we failed here, we might as well return and have a leak
354 * rather than continue and a bugchk
355 */
356 if (ret != 0) {
357 dev_err(&net_device->dev->device, "unable to send "
358 "revoke send buffer to netvsp");
359 return -1;
360 }
361 }
362
363 /* Teardown the gpadl on the vsp end */
364 if (net_device->send_buf_gpadl_handle) {
365 ret = vmbus_teardown_gpadl(net_device->dev->channel,
366 net_device->send_buf_gpadl_handle);
367
368 /*
369 * If we failed here, we might as well return and have a leak
370 * rather than continue and a bugchk
371 */
372 if (ret != 0) {
373 dev_err(&net_device->dev->device,
374 "unable to teardown send buffer's gpadl");
375 return -1;
376 }
377 net_device->send_buf_gpadl_handle = 0;
378 }
379
380 if (net_device->send_buf) {
381 /* Free up the receive buffer */
382 free_pages((unsigned long)net_device->send_buf,
383 get_order(net_device->send_buf_size));
384 net_device->send_buf = NULL;
385 }
386
387 return ret;
388}
389
390static int netvsc_init_send_buf(struct hv_device *device)
391{
392 int ret = 0;
393 int t;
394 struct netvsc_device *net_device;
395 struct nvsp_message *init_packet;
396
397 net_device = get_outbound_net_device(device);
398 if (!net_device) {
399 dev_err(&device->device, "unable to get net device..."
400 "device being destroyed?");
401 return -1;
402 }
403 if (net_device->send_buf_size <= 0) {
404 ret = -EINVAL;
405 goto cleanup;
406 }
407
408 net_device->send_buf =
409 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
410 get_order(net_device->send_buf_size));
411 if (!net_device->send_buf) {
412 dev_err(&device->device, "unable to allocate send "
413 "buffer of size %d", net_device->send_buf_size);
414 ret = -1;
415 goto cleanup;
416 }
417
418 /*
419 * Establish the gpadl handle for this buffer on this
420 * channel. Note: This call uses the vmbus connection rather
421 * than the channel to establish the gpadl handle.
422 */
423 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
424 net_device->send_buf_size,
425 &net_device->send_buf_gpadl_handle);
426 if (ret != 0) {
427 dev_err(&device->device, "unable to establish send buffer's gpadl");
428 goto cleanup;
429 }
430
431 /* Notify the NetVsp of the gpadl handle */
432 init_packet = &net_device->channel_init_pkt;
433
434 memset(init_packet, 0, sizeof(struct nvsp_message));
435
436 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
437 init_packet->msg.v1_msg.send_recv_buf.
438 gpadl_handle = net_device->send_buf_gpadl_handle;
439 init_packet->msg.v1_msg.send_recv_buf.id =
440 NETVSC_SEND_BUFFER_ID;
441
442 /* Send the gpadl notification request */
443 ret = vmbus_sendpacket(device->channel, init_packet,
444 sizeof(struct nvsp_message),
445 (unsigned long)init_packet,
446 VM_PKT_DATA_INBAND,
447 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
448 if (ret != 0) {
449 dev_err(&device->device,
450 "unable to send receive buffer's gpadl to netvsp");
451 goto cleanup;
452 }
453
454 t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
455
456 BUG_ON(t == 0);
457
458 /* Check the response */
459 if (init_packet->msg.v1_msg.
460 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
461 dev_err(&device->device, "Unable to complete send buffer "
462 "initialzation with NetVsp - status %d",
463 init_packet->msg.v1_msg.
464 send_send_buf_complete.status);
465 ret = -1;
466 goto cleanup;
467 }
468
469 net_device->send_section_size = init_packet->
470 msg.v1_msg.send_send_buf_complete.section_size;
471
472 goto exit;
473
474cleanup:
475 netvsc_destroy_send_buf(net_device);
476
477exit:
478 put_net_device(device);
479 return ret;
480}
481
482
483static int netvsc_connect_vsp(struct hv_device *device)
484{
485 int ret, t;
486 struct netvsc_device *net_device;
487 struct nvsp_message *init_packet;
488 int ndis_version;
489
490 net_device = get_outbound_net_device(device);
491 if (!net_device) {
492 dev_err(&device->device, "unable to get net device..."
493 "device being destroyed?");
494 return -1;
495 }
496
497 init_packet = &net_device->channel_init_pkt;
498
499 memset(init_packet, 0, sizeof(struct nvsp_message));
500 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
501 init_packet->msg.init_msg.init.min_protocol_ver =
502 NVSP_MIN_PROTOCOL_VERSION;
503 init_packet->msg.init_msg.init.max_protocol_ver =
504 NVSP_MAX_PROTOCOL_VERSION;
505
506 /* Send the init request */
507 ret = vmbus_sendpacket(device->channel, init_packet,
508 sizeof(struct nvsp_message),
509 (unsigned long)init_packet,
510 VM_PKT_DATA_INBAND,
511 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
512
513 if (ret != 0)
514 goto cleanup;
515
516 t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
517
518 if (t == 0) {
519 ret = -ETIMEDOUT;
520 goto cleanup;
521 }
522
523 if (init_packet->msg.init_msg.init_complete.status !=
524 NVSP_STAT_SUCCESS) {
525 ret = -1;
526 goto cleanup;
527 }
528
529 if (init_packet->msg.init_msg.init_complete.
530 negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
531 ret = -1;
532 goto cleanup;
533 }
534 /* Send the ndis version */
535 memset(init_packet, 0, sizeof(struct nvsp_message));
536
537 ndis_version = 0x00050000;
538
539 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
540 init_packet->msg.v1_msg.
541 send_ndis_ver.ndis_major_ver =
542 (ndis_version & 0xFFFF0000) >> 16;
543 init_packet->msg.v1_msg.
544 send_ndis_ver.ndis_minor_ver =
545 ndis_version & 0xFFFF;
546
547 /* Send the init request */
548 ret = vmbus_sendpacket(device->channel, init_packet,
549 sizeof(struct nvsp_message),
550 (unsigned long)init_packet,
551 VM_PKT_DATA_INBAND, 0);
552 if (ret != 0) {
553 ret = -1;
554 goto cleanup;
555 }
556
557 /* Post the big receive buffer to NetVSP */
558 ret = netvsc_init_recv_buf(device);
559 if (ret == 0)
560 ret = netvsc_init_send_buf(device);
561
562cleanup:
563 put_net_device(device);
564 return ret;
565}
566
567static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
568{
569 netvsc_destroy_recv_buf(net_device);
570 netvsc_destroy_send_buf(net_device);
571}
572
573/*
574 * netvsc_device_remove - Callback when the root bus device is removed
575 */
576int netvsc_device_remove(struct hv_device *device)
577{
578 struct netvsc_device *net_device;
579 struct hv_netvsc_packet *netvsc_packet, *pos;
580
581 /* Stop outbound traffic ie sends and receives completions */
582 net_device = release_outbound_net_device(device);
583 if (!net_device) {
584 dev_err(&device->device, "No net device present!!");
585 return -1;
586 }
587
588 /* Wait for all send completions */
589 while (atomic_read(&net_device->num_outstanding_sends)) {
590 dev_err(&device->device,
591 "waiting for %d requests to complete...",
592 atomic_read(&net_device->num_outstanding_sends));
593 udelay(100);
594 }
595
596 netvsc_disconnect_vsp(net_device);
597
598 /* Stop inbound traffic ie receives and sends completions */
599 net_device = release_inbound_net_device(device);
600
601 /* At this point, no one should be accessing netDevice except in here */
602 dev_notice(&device->device, "net device safe to remove");
603
604 /* Now, we can close the channel safely */
605 vmbus_close(device->channel);
606
607 /* Release all resources */
608 list_for_each_entry_safe(netvsc_packet, pos,
609 &net_device->recv_pkt_list, list_ent) {
610 list_del(&netvsc_packet->list_ent);
611 kfree(netvsc_packet);
612 }
613
614 free_net_device(net_device);
615 return 0;
616}
617
618static void netvsc_send_completion(struct hv_device *device,
619 struct vmpacket_descriptor *packet)
620{
621 struct netvsc_device *net_device;
622 struct nvsp_message *nvsp_packet;
623 struct hv_netvsc_packet *nvsc_packet;
624
625 net_device = get_inbound_net_device(device);
626 if (!net_device) {
627 dev_err(&device->device, "unable to get net device..."
628 "device being destroyed?");
629 return;
630 }
631
632 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
633 (packet->offset8 << 3));
634
635 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
636 (nvsp_packet->hdr.msg_type ==
637 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
638 (nvsp_packet->hdr.msg_type ==
639 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
640 /* Copy the response back */
641 memcpy(&net_device->channel_init_pkt, nvsp_packet,
642 sizeof(struct nvsp_message));
643 complete(&net_device->channel_init_wait);
644 } else if (nvsp_packet->hdr.msg_type ==
645 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
646 /* Get the send context */
647 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
648 packet->trans_id;
649
650 /* Notify the layer above us */
651 nvsc_packet->completion.send.send_completion(
652 nvsc_packet->completion.send.send_completion_ctx);
653
654 atomic_dec(&net_device->num_outstanding_sends);
655 } else {
656 dev_err(&device->device, "Unknown send completion packet type- "
657 "%d received!!", nvsp_packet->hdr.msg_type);
658 }
659
660 put_net_device(device);
661}
662
663int netvsc_send(struct hv_device *device,
664 struct hv_netvsc_packet *packet)
665{
666 struct netvsc_device *net_device;
667 int ret = 0;
668
669 struct nvsp_message sendMessage;
670
671 net_device = get_outbound_net_device(device);
672 if (!net_device) {
673 dev_err(&device->device, "net device (%p) shutting down..."
674 "ignoring outbound packets", net_device);
675 return -2;
676 }
677
678 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
679 if (packet->is_data_pkt) {
680 /* 0 is RMC_DATA; */
681 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
682 } else {
683 /* 1 is RMC_CONTROL; */
684 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
685 }
686
687 /* Not using send buffer section */
688 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
689 0xFFFFFFFF;
690 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
691
692 if (packet->page_buf_cnt) {
693 ret = vmbus_sendpacket_pagebuffer(device->channel,
694 packet->page_buf,
695 packet->page_buf_cnt,
696 &sendMessage,
697 sizeof(struct nvsp_message),
698 (unsigned long)packet);
699 } else {
700 ret = vmbus_sendpacket(device->channel, &sendMessage,
701 sizeof(struct nvsp_message),
702 (unsigned long)packet,
703 VM_PKT_DATA_INBAND,
704 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
705
706 }
707
708 if (ret != 0)
709 dev_err(&device->device, "Unable to send packet %p ret %d",
710 packet, ret);
711
712 atomic_inc(&net_device->num_outstanding_sends);
713 put_net_device(device);
714 return ret;
715}
716
717static void netvsc_send_recv_completion(struct hv_device *device,
718 u64 transaction_id)
719{
720 struct nvsp_message recvcompMessage;
721 int retries = 0;
722 int ret;
723
724 recvcompMessage.hdr.msg_type =
725 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
726
727 /* FIXME: Pass in the status */
728 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
729 NVSP_STAT_SUCCESS;
730
731retry_send_cmplt:
732 /* Send the completion */
733 ret = vmbus_sendpacket(device->channel, &recvcompMessage,
734 sizeof(struct nvsp_message), transaction_id,
735 VM_PKT_COMP, 0);
736 if (ret == 0) {
737 /* success */
738 /* no-op */
739 } else if (ret == -1) {
740 /* no more room...wait a bit and attempt to retry 3 times */
741 retries++;
742 dev_err(&device->device, "unable to send receive completion pkt"
743 " (tid %llx)...retrying %d", transaction_id, retries);
744
745 if (retries < 4) {
746 udelay(100);
747 goto retry_send_cmplt;
748 } else {
749 dev_err(&device->device, "unable to send receive "
750 "completion pkt (tid %llx)...give up retrying",
751 transaction_id);
752 }
753 } else {
754 dev_err(&device->device, "unable to send receive "
755 "completion pkt - %llx", transaction_id);
756 }
757}
758
759/* Send a receive completion packet to RNDIS device (ie NetVsp) */
760static void netvsc_receive_completion(void *context)
761{
762 struct hv_netvsc_packet *packet = context;
763 struct hv_device *device = (struct hv_device *)packet->device;
764 struct netvsc_device *net_device;
765 u64 transaction_id = 0;
766 bool fsend_receive_comp = false;
767 unsigned long flags;
768
769 /*
770 * Even though it seems logical to do a GetOutboundNetDevice() here to
771 * send out receive completion, we are using GetInboundNetDevice()
772 * since we may have disable outbound traffic already.
773 */
774 net_device = get_inbound_net_device(device);
775 if (!net_device) {
776 dev_err(&device->device, "unable to get net device..."
777 "device being destroyed?");
778 return;
779 }
780
781 /* Overloading use of the lock. */
782 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
783
784 packet->xfer_page_pkt->count--;
785
786 /*
787 * Last one in the line that represent 1 xfer page packet.
788 * Return the xfer page packet itself to the freelist
789 */
790 if (packet->xfer_page_pkt->count == 0) {
791 fsend_receive_comp = true;
792 transaction_id = packet->completion.recv.recv_completion_tid;
793 list_add_tail(&packet->xfer_page_pkt->list_ent,
794 &net_device->recv_pkt_list);
795
796 }
797
798 /* Put the packet back */
799 list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
800 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
801
802 /* Send a receive completion for the xfer page packet */
803 if (fsend_receive_comp)
804 netvsc_send_recv_completion(device, transaction_id);
805
806 put_net_device(device);
807}
808
809static void netvsc_receive(struct hv_device *device,
810 struct vmpacket_descriptor *packet)
811{
812 struct netvsc_device *net_device;
813 struct vmtransfer_page_packet_header *vmxferpage_packet;
814 struct nvsp_message *nvsp_packet;
815 struct hv_netvsc_packet *netvsc_packet = NULL;
816 unsigned long start;
817 unsigned long end, end_virtual;
818 /* struct netvsc_driver *netvscDriver; */
819 struct xferpage_packet *xferpage_packet = NULL;
820 int i, j;
821 int count = 0, bytes_remain = 0;
822 unsigned long flags;
823
824 LIST_HEAD(listHead);
825
826 net_device = get_inbound_net_device(device);
827 if (!net_device) {
828 dev_err(&device->device, "unable to get net device..."
829 "device being destroyed?");
830 return;
831 }
832
833 /*
834 * All inbound packets other than send completion should be xfer page
835 * packet
836 */
837 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
838 dev_err(&device->device, "Unknown packet type received - %d",
839 packet->type);
840 put_net_device(device);
841 return;
842 }
843
844 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
845 (packet->offset8 << 3));
846
847 /* Make sure this is a valid nvsp packet */
848 if (nvsp_packet->hdr.msg_type !=
849 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
850 dev_err(&device->device, "Unknown nvsp packet type received-"
851 " %d", nvsp_packet->hdr.msg_type);
852 put_net_device(device);
853 return;
854 }
855
856 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
857
858 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
859 dev_err(&device->device, "Invalid xfer page set id - "
860 "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
861 vmxferpage_packet->xfer_pageset_id);
862 put_net_device(device);
863 return;
864 }
865
866 /*
867 * Grab free packets (range count + 1) to represent this xfer
868 * page packet. +1 to represent the xfer page packet itself.
869 * We grab it here so that we know exactly how many we can
870 * fulfil
871 */
872 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
873 while (!list_empty(&net_device->recv_pkt_list)) {
874 list_move_tail(net_device->recv_pkt_list.next, &listHead);
875 if (++count == vmxferpage_packet->range_cnt + 1)
876 break;
877 }
878 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
879
880 /*
881 * We need at least 2 netvsc pkts (1 to represent the xfer
882 * page and at least 1 for the range) i.e. we can handled
883 * some of the xfer page packet ranges...
884 */
885 if (count < 2) {
886 dev_err(&device->device, "Got only %d netvsc pkt...needed "
887 "%d pkts. Dropping this xfer page packet completely!",
888 count, vmxferpage_packet->range_cnt + 1);
889
890 /* Return it to the freelist */
891 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
892 for (i = count; i != 0; i--) {
893 list_move_tail(listHead.next,
894 &net_device->recv_pkt_list);
895 }
896 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
897 flags);
898
899 netvsc_send_recv_completion(device,
900 vmxferpage_packet->d.trans_id);
901
902 put_net_device(device);
903 return;
904 }
905
906 /* Remove the 1st packet to represent the xfer page packet itself */
907 xferpage_packet = (struct xferpage_packet *)listHead.next;
908 list_del(&xferpage_packet->list_ent);
909
910 /* This is how much we can satisfy */
911 xferpage_packet->count = count - 1;
912
913 if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
914 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
915 "this xfer page...got %d",
916 vmxferpage_packet->range_cnt, xferpage_packet->count);
917 }
918
919 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
920 for (i = 0; i < (count - 1); i++) {
921 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
922 list_del(&netvsc_packet->list_ent);
923
924 /* Initialize the netvsc packet */
925 netvsc_packet->xfer_page_pkt = xferpage_packet;
926 netvsc_packet->completion.recv.recv_completion =
927 netvsc_receive_completion;
928 netvsc_packet->completion.recv.recv_completion_ctx =
929 netvsc_packet;
930 netvsc_packet->device = device;
931 /* Save this so that we can send it back */
932 netvsc_packet->completion.recv.recv_completion_tid =
933 vmxferpage_packet->d.trans_id;
934
935 netvsc_packet->total_data_buflen =
936 vmxferpage_packet->ranges[i].byte_count;
937 netvsc_packet->page_buf_cnt = 1;
938
939 netvsc_packet->page_buf[0].len =
940 vmxferpage_packet->ranges[i].byte_count;
941
942 start = virt_to_phys((void *)((unsigned long)net_device->
943 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
944
945 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
946 end_virtual = (unsigned long)net_device->recv_buf
947 + vmxferpage_packet->ranges[i].byte_offset
948 + vmxferpage_packet->ranges[i].byte_count - 1;
949 end = virt_to_phys((void *)end_virtual);
950
951 /* Calculate the page relative offset */
952 netvsc_packet->page_buf[0].offset =
953 vmxferpage_packet->ranges[i].byte_offset &
954 (PAGE_SIZE - 1);
955 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
956 /* Handle frame across multiple pages: */
957 netvsc_packet->page_buf[0].len =
958 (netvsc_packet->page_buf[0].pfn <<
959 PAGE_SHIFT)
960 + PAGE_SIZE - start;
961 bytes_remain = netvsc_packet->total_data_buflen -
962 netvsc_packet->page_buf[0].len;
963 for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
964 netvsc_packet->page_buf[j].offset = 0;
965 if (bytes_remain <= PAGE_SIZE) {
966 netvsc_packet->page_buf[j].len =
967 bytes_remain;
968 bytes_remain = 0;
969 } else {
970 netvsc_packet->page_buf[j].len =
971 PAGE_SIZE;
972 bytes_remain -= PAGE_SIZE;
973 }
974 netvsc_packet->page_buf[j].pfn =
975 virt_to_phys((void *)(end_virtual -
976 bytes_remain)) >> PAGE_SHIFT;
977 netvsc_packet->page_buf_cnt++;
978 if (bytes_remain == 0)
979 break;
980 }
981 }
982
983 /* Pass it to the upper layer */
984 rndis_filter_receive(device, netvsc_packet);
985
986 netvsc_receive_completion(netvsc_packet->
987 completion.recv.recv_completion_ctx);
988 }
989
990 put_net_device(device);
991}
992
993static void netvsc_channel_cb(void *context)
994{
995 int ret;
996 struct hv_device *device = context;
997 struct netvsc_device *net_device;
998 u32 bytes_recvd;
999 u64 request_id;
1000 unsigned char *packet;
1001 struct vmpacket_descriptor *desc;
1002 unsigned char *buffer;
1003 int bufferlen = NETVSC_PACKET_SIZE;
1004
1005 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1006 GFP_ATOMIC);
1007 if (!packet)
1008 return;
1009 buffer = packet;
1010
1011 net_device = get_inbound_net_device(device);
1012 if (!net_device) {
1013 dev_err(&device->device, "net device (%p) shutting down..."
1014 "ignoring inbound packets", net_device);
1015 goto out;
1016 }
1017
1018 do {
1019 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
1020 &bytes_recvd, &request_id);
1021 if (ret == 0) {
1022 if (bytes_recvd > 0) {
1023 desc = (struct vmpacket_descriptor *)buffer;
1024 switch (desc->type) {
1025 case VM_PKT_COMP:
1026 netvsc_send_completion(device, desc);
1027 break;
1028
1029 case VM_PKT_DATA_USING_XFER_PAGES:
1030 netvsc_receive(device, desc);
1031 break;
1032
1033 default:
1034 dev_err(&device->device,
1035 "unhandled packet type %d, "
1036 "tid %llx len %d\n",
1037 desc->type, request_id,
1038 bytes_recvd);
1039 break;
1040 }
1041
1042 /* reset */
1043 if (bufferlen > NETVSC_PACKET_SIZE) {
1044 kfree(buffer);
1045 buffer = packet;
1046 bufferlen = NETVSC_PACKET_SIZE;
1047 }
1048 } else {
1049 /* reset */
1050 if (bufferlen > NETVSC_PACKET_SIZE) {
1051 kfree(buffer);
1052 buffer = packet;
1053 bufferlen = NETVSC_PACKET_SIZE;
1054 }
1055
1056 break;
1057 }
1058 } else if (ret == -2) {
1059 /* Handle large packet */
1060 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1061 if (buffer == NULL) {
1062 /* Try again next time around */
1063 dev_err(&device->device,
1064 "unable to allocate buffer of size "
1065 "(%d)!!", bytes_recvd);
1066 break;
1067 }
1068
1069 bufferlen = bytes_recvd;
1070 }
1071 } while (1);
1072
1073 put_net_device(device);
1074out:
1075 kfree(buffer);
1076 return;
1077}
1078
1079/*
1080 * netvsc_device_add - Callback when the device belonging to this
1081 * driver is added
1082 */
1083int netvsc_device_add(struct hv_device *device, void *additional_info)
1084{
1085 int ret = 0;
1086 int i;
1087 int ring_size =
1088 ((struct netvsc_device_info *)additional_info)->ring_size;
1089 struct netvsc_device *net_device;
1090 struct hv_netvsc_packet *packet, *pos;
1091
1092 net_device = alloc_net_device(device);
1093 if (!net_device) {
1094 ret = -1;
1095 goto cleanup;
1096 }
1097
1098 /* Initialize the NetVSC channel extension */
1099 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1100 spin_lock_init(&net_device->recv_pkt_list_lock);
1101
1102 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
1103
1104 INIT_LIST_HEAD(&net_device->recv_pkt_list);
1105
1106 for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
1107 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
1108 (NETVSC_RECEIVE_SG_COUNT *
1109 sizeof(struct hv_page_buffer)), GFP_KERNEL);
1110 if (!packet)
1111 break;
1112
1113 list_add_tail(&packet->list_ent,
1114 &net_device->recv_pkt_list);
1115 }
1116 init_completion(&net_device->channel_init_wait);
1117
1118 /* Open the channel */
1119 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1120 ring_size * PAGE_SIZE, NULL, 0,
1121 netvsc_channel_cb, device);
1122
1123 if (ret != 0) {
1124 dev_err(&device->device, "unable to open channel: %d", ret);
1125 ret = -1;
1126 goto cleanup;
1127 }
1128
1129 /* Channel is opened */
1130 pr_info("hv_netvsc channel opened successfully");
1131
1132 /* Connect with the NetVsp */
1133 ret = netvsc_connect_vsp(device);
1134 if (ret != 0) {
1135 dev_err(&device->device,
1136 "unable to connect to NetVSP - %d", ret);
1137 ret = -1;
1138 goto close;
1139 }
1140
1141 return ret;
1142
1143close:
1144 /* Now, we can close the channel safely */
1145 vmbus_close(device->channel);
1146
1147cleanup:
1148
1149 if (net_device) {
1150 list_for_each_entry_safe(packet, pos,
1151 &net_device->recv_pkt_list,
1152 list_ent) {
1153 list_del(&packet->list_ent);
1154 kfree(packet);
1155 }
1156
1157 release_outbound_net_device(device);
1158 release_inbound_net_device(device);
1159
1160 free_net_device(net_device);
1161 }
1162
1163 return ret;
1164}
1165
1166/*
1167 * netvsc_initialize - Main entry point
1168 */
1169int netvsc_initialize(struct hv_driver *drv)
1170{
1171
1172 drv->name = driver_name;
1173 memcpy(&drv->dev_type, &netvsc_device_type, sizeof(struct hv_guid));
1174
1175 return 0;
1176}