2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
34 #include "hyperv_net.h"
37 * Switch the data path from the synthetic interface to the VF
40 void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf)
42 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
43 struct hv_device *dev = nv_dev->dev;
45 memset(init_pkt, 0, sizeof(struct nvsp_message));
46 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
48 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 init_pkt->msg.v4_msg.active_dp.active_datapath =
52 NVSP_DATAPATH_SYNTHETIC;
54 vmbus_sendpacket(dev->channel, init_pkt,
55 sizeof(struct nvsp_message),
56 (unsigned long)init_pkt,
57 VM_PKT_DATA_INBAND, 0);
61 static struct netvsc_device *alloc_net_device(struct hv_device *device)
63 struct netvsc_device *net_device;
64 struct net_device *ndev = hv_get_drvdata(device);
66 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
70 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
71 if (!net_device->cb_buffer) {
76 init_waitqueue_head(&net_device->wait_drain);
77 net_device->destroy = false;
78 atomic_set(&net_device->open_cnt, 0);
79 atomic_set(&net_device->vf_use_cnt, 0);
80 net_device->dev = device;
81 net_device->ndev = ndev;
82 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
83 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
85 net_device->vf_netdev = NULL;
86 net_device->vf_inject = false;
88 hv_set_drvdata(device, net_device);
92 static void free_netvsc_device(struct netvsc_device *nvdev)
94 kfree(nvdev->cb_buffer);
98 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
100 struct netvsc_device *net_device;
102 net_device = hv_get_drvdata(device);
103 if (net_device && net_device->destroy)
109 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
111 struct netvsc_device *net_device;
113 net_device = hv_get_drvdata(device);
118 if (net_device->destroy &&
119 atomic_read(&net_device->num_outstanding_sends) == 0)
127 static int netvsc_destroy_buf(struct netvsc_device *net_device)
129 struct nvsp_message *revoke_packet;
131 struct net_device *ndev = net_device->ndev;
134 * If we got a section count, it means we received a
135 * SendReceiveBufferComplete msg (ie sent
136 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
137 * to send a revoke msg here
139 if (net_device->recv_section_cnt) {
140 /* Send the revoke receive buffer */
141 revoke_packet = &net_device->revoke_packet;
142 memset(revoke_packet, 0, sizeof(struct nvsp_message));
144 revoke_packet->hdr.msg_type =
145 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
146 revoke_packet->msg.v1_msg.
147 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
149 ret = vmbus_sendpacket(net_device->dev->channel,
151 sizeof(struct nvsp_message),
152 (unsigned long)revoke_packet,
153 VM_PKT_DATA_INBAND, 0);
155 * If we failed here, we might as well return and
156 * have a leak rather than continue and a bugchk
159 netdev_err(ndev, "unable to send "
160 "revoke receive buffer to netvsp\n");
165 /* Teardown the gpadl on the vsp end */
166 if (net_device->recv_buf_gpadl_handle) {
167 ret = vmbus_teardown_gpadl(net_device->dev->channel,
168 net_device->recv_buf_gpadl_handle);
170 /* If we failed here, we might as well return and have a leak
171 * rather than continue and a bugchk
175 "unable to teardown receive buffer's gpadl\n");
178 net_device->recv_buf_gpadl_handle = 0;
181 if (net_device->recv_buf) {
182 /* Free up the receive buffer */
183 vfree(net_device->recv_buf);
184 net_device->recv_buf = NULL;
187 if (net_device->recv_section) {
188 net_device->recv_section_cnt = 0;
189 kfree(net_device->recv_section);
190 net_device->recv_section = NULL;
193 /* Deal with the send buffer we may have setup.
194 * If we got a send section size, it means we received a
195 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
196 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
197 * to send a revoke msg here
199 if (net_device->send_section_size) {
200 /* Send the revoke receive buffer */
201 revoke_packet = &net_device->revoke_packet;
202 memset(revoke_packet, 0, sizeof(struct nvsp_message));
204 revoke_packet->hdr.msg_type =
205 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
206 revoke_packet->msg.v1_msg.revoke_send_buf.id =
207 NETVSC_SEND_BUFFER_ID;
209 ret = vmbus_sendpacket(net_device->dev->channel,
211 sizeof(struct nvsp_message),
212 (unsigned long)revoke_packet,
213 VM_PKT_DATA_INBAND, 0);
214 /* If we failed here, we might as well return and
215 * have a leak rather than continue and a bugchk
218 netdev_err(ndev, "unable to send "
219 "revoke send buffer to netvsp\n");
223 /* Teardown the gpadl on the vsp end */
224 if (net_device->send_buf_gpadl_handle) {
225 ret = vmbus_teardown_gpadl(net_device->dev->channel,
226 net_device->send_buf_gpadl_handle);
228 /* If we failed here, we might as well return and have a leak
229 * rather than continue and a bugchk
233 "unable to teardown send buffer's gpadl\n");
236 net_device->send_buf_gpadl_handle = 0;
238 if (net_device->send_buf) {
239 /* Free up the send buffer */
240 vfree(net_device->send_buf);
241 net_device->send_buf = NULL;
243 kfree(net_device->send_section_map);
248 static int netvsc_init_buf(struct hv_device *device)
252 struct netvsc_device *net_device;
253 struct nvsp_message *init_packet;
254 struct net_device *ndev;
257 net_device = get_outbound_net_device(device);
260 ndev = net_device->ndev;
262 node = cpu_to_node(device->channel->target_cpu);
263 net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
264 if (!net_device->recv_buf)
265 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
267 if (!net_device->recv_buf) {
268 netdev_err(ndev, "unable to allocate receive "
269 "buffer of size %d\n", net_device->recv_buf_size);
275 * Establish the gpadl handle for this buffer on this
276 * channel. Note: This call uses the vmbus connection rather
277 * than the channel to establish the gpadl handle.
279 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
280 net_device->recv_buf_size,
281 &net_device->recv_buf_gpadl_handle);
284 "unable to establish receive buffer's gpadl\n");
289 /* Notify the NetVsp of the gpadl handle */
290 init_packet = &net_device->channel_init_pkt;
292 memset(init_packet, 0, sizeof(struct nvsp_message));
294 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
295 init_packet->msg.v1_msg.send_recv_buf.
296 gpadl_handle = net_device->recv_buf_gpadl_handle;
297 init_packet->msg.v1_msg.
298 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
300 /* Send the gpadl notification request */
301 ret = vmbus_sendpacket(device->channel, init_packet,
302 sizeof(struct nvsp_message),
303 (unsigned long)init_packet,
305 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
308 "unable to send receive buffer's gpadl to netvsp\n");
312 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
316 /* Check the response */
317 if (init_packet->msg.v1_msg.
318 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
319 netdev_err(ndev, "Unable to complete receive buffer "
320 "initialization with NetVsp - status %d\n",
321 init_packet->msg.v1_msg.
322 send_recv_buf_complete.status);
327 /* Parse the response */
329 net_device->recv_section_cnt = init_packet->msg.
330 v1_msg.send_recv_buf_complete.num_sections;
332 net_device->recv_section = kmemdup(
333 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
334 net_device->recv_section_cnt *
335 sizeof(struct nvsp_1_receive_buffer_section),
337 if (net_device->recv_section == NULL) {
343 * For 1st release, there should only be 1 section that represents the
344 * entire receive buffer
346 if (net_device->recv_section_cnt != 1 ||
347 net_device->recv_section->offset != 0) {
352 /* Now setup the send buffer.
354 net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
355 if (!net_device->send_buf)
356 net_device->send_buf = vzalloc(net_device->send_buf_size);
357 if (!net_device->send_buf) {
358 netdev_err(ndev, "unable to allocate send "
359 "buffer of size %d\n", net_device->send_buf_size);
364 /* Establish the gpadl handle for this buffer on this
365 * channel. Note: This call uses the vmbus connection rather
366 * than the channel to establish the gpadl handle.
368 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
369 net_device->send_buf_size,
370 &net_device->send_buf_gpadl_handle);
373 "unable to establish send buffer's gpadl\n");
377 /* Notify the NetVsp of the gpadl handle */
378 init_packet = &net_device->channel_init_pkt;
379 memset(init_packet, 0, sizeof(struct nvsp_message));
380 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
381 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
382 net_device->send_buf_gpadl_handle;
383 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
385 /* Send the gpadl notification request */
386 ret = vmbus_sendpacket(device->channel, init_packet,
387 sizeof(struct nvsp_message),
388 (unsigned long)init_packet,
390 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
393 "unable to send send buffer's gpadl to netvsp\n");
397 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
400 /* Check the response */
401 if (init_packet->msg.v1_msg.
402 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
403 netdev_err(ndev, "Unable to complete send buffer "
404 "initialization with NetVsp - status %d\n",
405 init_packet->msg.v1_msg.
406 send_send_buf_complete.status);
411 /* Parse the response */
412 net_device->send_section_size = init_packet->msg.
413 v1_msg.send_send_buf_complete.section_size;
415 /* Section count is simply the size divided by the section size.
417 net_device->send_section_cnt =
418 net_device->send_buf_size/net_device->send_section_size;
420 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
421 net_device->send_section_size, net_device->send_section_cnt);
423 /* Setup state for managing the send buffer. */
424 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
427 net_device->send_section_map =
428 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
429 if (net_device->send_section_map == NULL) {
437 netvsc_destroy_buf(net_device);
444 /* Negotiate NVSP protocol version */
445 static int negotiate_nvsp_ver(struct hv_device *device,
446 struct netvsc_device *net_device,
447 struct nvsp_message *init_packet,
453 memset(init_packet, 0, sizeof(struct nvsp_message));
454 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
455 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
456 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
458 /* Send the init request */
459 ret = vmbus_sendpacket(device->channel, init_packet,
460 sizeof(struct nvsp_message),
461 (unsigned long)init_packet,
463 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
468 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
473 if (init_packet->msg.init_msg.init_complete.status !=
477 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
480 /* NVSPv2 or later: Send NDIS config */
481 memset(init_packet, 0, sizeof(struct nvsp_message));
482 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
483 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
485 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
487 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
488 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
490 ret = vmbus_sendpacket(device->channel, init_packet,
491 sizeof(struct nvsp_message),
492 (unsigned long)init_packet,
493 VM_PKT_DATA_INBAND, 0);
498 static int netvsc_connect_vsp(struct hv_device *device)
501 struct netvsc_device *net_device;
502 struct nvsp_message *init_packet;
504 struct net_device *ndev;
505 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
506 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
507 int i, num_ver = 4; /* number of different NVSP versions */
509 net_device = get_outbound_net_device(device);
512 ndev = net_device->ndev;
514 init_packet = &net_device->channel_init_pkt;
516 /* Negotiate the latest NVSP protocol supported */
517 for (i = num_ver - 1; i >= 0; i--)
518 if (negotiate_nvsp_ver(device, net_device, init_packet,
520 net_device->nvsp_version = ver_list[i];
529 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
531 /* Send the ndis version */
532 memset(init_packet, 0, sizeof(struct nvsp_message));
534 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
535 ndis_version = 0x00060001;
537 ndis_version = 0x0006001e;
539 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
540 init_packet->msg.v1_msg.
541 send_ndis_ver.ndis_major_ver =
542 (ndis_version & 0xFFFF0000) >> 16;
543 init_packet->msg.v1_msg.
544 send_ndis_ver.ndis_minor_ver =
545 ndis_version & 0xFFFF;
547 /* Send the init request */
548 ret = vmbus_sendpacket(device->channel, init_packet,
549 sizeof(struct nvsp_message),
550 (unsigned long)init_packet,
551 VM_PKT_DATA_INBAND, 0);
555 /* Post the big receive buffer to NetVSP */
556 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
557 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
559 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
560 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
562 ret = netvsc_init_buf(device);
568 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
570 netvsc_destroy_buf(net_device);
574 * netvsc_device_remove - Callback when the root bus device is removed
576 int netvsc_device_remove(struct hv_device *device)
578 struct netvsc_device *net_device;
581 net_device = hv_get_drvdata(device);
583 netvsc_disconnect_vsp(net_device);
586 * Since we have already drained, we don't need to busy wait
587 * as was done in final_release_stor_device()
588 * Note that we cannot set the ext pointer to NULL until
589 * we have drained - to drain the outgoing packets, we need to
590 * allow incoming packets.
593 spin_lock_irqsave(&device->channel->inbound_lock, flags);
594 hv_set_drvdata(device, NULL);
595 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
598 * At this point, no one should be accessing net_device
601 dev_notice(&device->device, "net device safe to remove\n");
603 /* Now, we can close the channel safely */
604 vmbus_close(device->channel);
606 /* Release all resources */
607 vfree(net_device->sub_cb_buf);
608 free_netvsc_device(net_device);
613 #define RING_AVAIL_PERCENT_HIWATER 20
614 #define RING_AVAIL_PERCENT_LOWATER 10
617 * Get the percentage of available bytes to write in the ring.
618 * The return value is in range from 0 to 100.
620 static inline u32 hv_ringbuf_avail_percent(
621 struct hv_ring_buffer_info *ring_info)
623 u32 avail_read, avail_write;
625 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
627 return avail_write * 100 / ring_info->ring_datasize;
630 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
633 sync_change_bit(index, net_device->send_section_map);
636 static void netvsc_send_completion(struct netvsc_device *net_device,
637 struct vmbus_channel *incoming_channel,
638 struct hv_device *device,
639 struct vmpacket_descriptor *packet)
641 struct nvsp_message *nvsp_packet;
642 struct hv_netvsc_packet *nvsc_packet;
643 struct net_device *ndev;
647 ndev = net_device->ndev;
649 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
650 (packet->offset8 << 3));
652 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
653 (nvsp_packet->hdr.msg_type ==
654 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
655 (nvsp_packet->hdr.msg_type ==
656 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
657 (nvsp_packet->hdr.msg_type ==
658 NVSP_MSG5_TYPE_SUBCHANNEL)) {
659 /* Copy the response back */
660 memcpy(&net_device->channel_init_pkt, nvsp_packet,
661 sizeof(struct nvsp_message));
662 complete(&net_device->channel_init_wait);
663 } else if (nvsp_packet->hdr.msg_type ==
664 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
665 int num_outstanding_sends;
667 struct vmbus_channel *channel = device->channel;
670 /* Get the send context */
671 skb = (struct sk_buff *)(unsigned long)packet->trans_id;
673 /* Notify the layer above us */
675 nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
676 send_index = nvsc_packet->send_buf_index;
677 if (send_index != NETVSC_INVALID_INDEX)
678 netvsc_free_send_slot(net_device, send_index);
679 q_idx = nvsc_packet->q_idx;
680 channel = incoming_channel;
681 dev_kfree_skb_any(skb);
684 num_outstanding_sends =
685 atomic_dec_return(&net_device->num_outstanding_sends);
686 queue_sends = atomic_dec_return(&net_device->
689 if (net_device->destroy && num_outstanding_sends == 0)
690 wake_up(&net_device->wait_drain);
692 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
693 !net_device->nd_ctx->start_remove &&
694 (hv_ringbuf_avail_percent(&channel->outbound) >
695 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
696 netif_tx_wake_queue(netdev_get_tx_queue(
699 netdev_err(ndev, "Unknown send completion packet type- "
700 "%d received!!\n", nvsp_packet->hdr.msg_type);
705 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
708 u32 max_words = net_device->map_words;
709 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
710 u32 section_cnt = net_device->send_section_cnt;
711 int ret_val = NETVSC_INVALID_INDEX;
715 for (i = 0; i < max_words; i++) {
718 index = ffz(map_addr[i]);
719 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
722 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
724 ret_val = (index + (i * BITS_PER_LONG));
730 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
731 unsigned int section_index,
733 struct hv_netvsc_packet *packet,
734 struct rndis_message *rndis_msg,
735 struct hv_page_buffer **pb,
738 char *start = net_device->send_buf;
739 char *dest = start + (section_index * net_device->send_section_size)
742 bool is_data_pkt = (skb != NULL) ? true : false;
743 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
746 u32 remain = packet->total_data_buflen % net_device->pkt_align;
747 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
748 packet->page_buf_cnt;
751 if (is_data_pkt && xmit_more && remain &&
752 !packet->cp_partial) {
753 padding = net_device->pkt_align - remain;
754 rndis_msg->msg_len += padding;
755 packet->total_data_buflen += padding;
758 for (i = 0; i < page_count; i++) {
759 char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
760 u32 offset = (*pb)[i].offset;
761 u32 len = (*pb)[i].len;
763 memcpy(dest, (src + offset), len);
769 memset(dest, 0, padding);
776 static inline int netvsc_send_pkt(
777 struct hv_netvsc_packet *packet,
778 struct netvsc_device *net_device,
779 struct hv_page_buffer **pb,
782 struct nvsp_message nvmsg;
783 u16 q_idx = packet->q_idx;
784 struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
785 struct net_device *ndev = net_device->ndev;
788 struct hv_page_buffer *pgbuf;
789 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
790 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
792 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
795 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
797 /* 1 is RMC_CONTROL; */
798 nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
801 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
802 packet->send_buf_index;
803 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
804 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
806 nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
807 packet->total_data_buflen;
811 if (out_channel->rescind)
815 * It is possible that once we successfully place this packet
816 * on the ringbuffer, we may stop the queue. In that case, we want
817 * to notify the host independent of the xmit_more flag. We don't
818 * need to be precise here; in the worst case we may signal the host
821 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
824 if (packet->page_buf_cnt) {
825 pgbuf = packet->cp_partial ? (*pb) +
826 packet->rmsg_pgcnt : (*pb);
827 ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
829 packet->page_buf_cnt,
831 sizeof(struct nvsp_message),
833 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
836 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
837 sizeof(struct nvsp_message),
840 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
845 atomic_inc(&net_device->num_outstanding_sends);
846 atomic_inc(&net_device->queue_sends[q_idx]);
848 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
849 netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
851 if (atomic_read(&net_device->
852 queue_sends[q_idx]) < 1)
853 netif_tx_wake_queue(netdev_get_tx_queue(
856 } else if (ret == -EAGAIN) {
857 netif_tx_stop_queue(netdev_get_tx_queue(
859 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
860 netif_tx_wake_queue(netdev_get_tx_queue(
865 netdev_err(ndev, "Unable to send packet %p ret %d\n",
872 /* Move packet out of multi send data (msd), and clear msd */
873 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
874 struct sk_buff **msd_skb,
875 struct multi_send_data *msdp)
877 *msd_skb = msdp->skb;
878 *msd_send = msdp->pkt;
884 int netvsc_send(struct hv_device *device,
885 struct hv_netvsc_packet *packet,
886 struct rndis_message *rndis_msg,
887 struct hv_page_buffer **pb,
890 struct netvsc_device *net_device;
891 int ret = 0, m_ret = 0;
892 struct vmbus_channel *out_channel;
893 u16 q_idx = packet->q_idx;
894 u32 pktlen = packet->total_data_buflen, msd_len = 0;
895 unsigned int section_index = NETVSC_INVALID_INDEX;
896 struct multi_send_data *msdp;
897 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
898 struct sk_buff *msd_skb = NULL;
900 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
902 net_device = get_outbound_net_device(device);
906 out_channel = net_device->chn_table[q_idx];
908 packet->send_buf_index = NETVSC_INVALID_INDEX;
909 packet->cp_partial = false;
911 /* Send control message directly without accessing msd (Multi-Send
912 * Data) field which may be changed during data packet processing.
919 msdp = &net_device->msd[q_idx];
921 /* batch packets in send buffer if possible */
923 msd_len = msdp->pkt->total_data_buflen;
925 try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
928 if (try_batch && msd_len + pktlen + net_device->pkt_align <
929 net_device->send_section_size) {
930 section_index = msdp->pkt->send_buf_index;
932 } else if (try_batch && msd_len + packet->rmsg_size <
933 net_device->send_section_size) {
934 section_index = msdp->pkt->send_buf_index;
935 packet->cp_partial = true;
937 } else if ((skb != NULL) && pktlen + net_device->pkt_align <
938 net_device->send_section_size) {
939 section_index = netvsc_get_next_send_section(net_device);
940 if (section_index != NETVSC_INVALID_INDEX) {
941 move_pkt_msd(&msd_send, &msd_skb, msdp);
946 if (section_index != NETVSC_INVALID_INDEX) {
947 netvsc_copy_to_send_buf(net_device,
948 section_index, msd_len,
949 packet, rndis_msg, pb, skb);
951 packet->send_buf_index = section_index;
953 if (packet->cp_partial) {
954 packet->page_buf_cnt -= packet->rmsg_pgcnt;
955 packet->total_data_buflen = msd_len + packet->rmsg_size;
957 packet->page_buf_cnt = 0;
958 packet->total_data_buflen += msd_len;
962 dev_kfree_skb_any(msdp->skb);
964 if (xmit_more && !packet->cp_partial) {
975 move_pkt_msd(&msd_send, &msd_skb, msdp);
980 m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
983 netvsc_free_send_slot(net_device,
984 msd_send->send_buf_index);
985 dev_kfree_skb_any(msd_skb);
991 ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
993 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
994 netvsc_free_send_slot(net_device, section_index);
999 static void netvsc_send_recv_completion(struct hv_device *device,
1000 struct vmbus_channel *channel,
1001 struct netvsc_device *net_device,
1002 u64 transaction_id, u32 status)
1004 struct nvsp_message recvcompMessage;
1007 struct net_device *ndev;
1009 ndev = net_device->ndev;
1011 recvcompMessage.hdr.msg_type =
1012 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
1014 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
1017 /* Send the completion */
1018 ret = vmbus_sendpacket(channel, &recvcompMessage,
1019 sizeof(struct nvsp_message), transaction_id,
1024 } else if (ret == -EAGAIN) {
1025 /* no more room...wait a bit and attempt to retry 3 times */
1027 netdev_err(ndev, "unable to send receive completion pkt"
1028 " (tid %llx)...retrying %d\n", transaction_id, retries);
1032 goto retry_send_cmplt;
1034 netdev_err(ndev, "unable to send receive "
1035 "completion pkt (tid %llx)...give up retrying\n",
1039 netdev_err(ndev, "unable to send receive "
1040 "completion pkt - %llx\n", transaction_id);
1044 static void netvsc_receive(struct netvsc_device *net_device,
1045 struct vmbus_channel *channel,
1046 struct hv_device *device,
1047 struct vmpacket_descriptor *packet)
1049 struct vmtransfer_page_packet_header *vmxferpage_packet;
1050 struct nvsp_message *nvsp_packet;
1051 struct hv_netvsc_packet nv_pkt;
1052 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
1053 u32 status = NVSP_STAT_SUCCESS;
1056 struct net_device *ndev;
1059 ndev = net_device->ndev;
1062 * All inbound packets other than send completion should be xfer page
1065 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
1066 netdev_err(ndev, "Unknown packet type received - %d\n",
1071 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1072 (packet->offset8 << 3));
1074 /* Make sure this is a valid nvsp packet */
1075 if (nvsp_packet->hdr.msg_type !=
1076 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1077 netdev_err(ndev, "Unknown nvsp packet type received-"
1078 " %d\n", nvsp_packet->hdr.msg_type);
1082 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1084 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1085 netdev_err(ndev, "Invalid xfer page set id - "
1086 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1087 vmxferpage_packet->xfer_pageset_id);
1091 count = vmxferpage_packet->range_cnt;
1093 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1094 for (i = 0; i < count; i++) {
1095 /* Initialize the netvsc packet */
1096 data = (void *)((unsigned long)net_device->
1097 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1098 netvsc_packet->total_data_buflen =
1099 vmxferpage_packet->ranges[i].byte_count;
1101 /* Pass it to the upper layer */
1102 status = rndis_filter_receive(device, netvsc_packet, &data,
1107 netvsc_send_recv_completion(device, channel, net_device,
1108 vmxferpage_packet->d.trans_id, status);
1112 static void netvsc_send_table(struct hv_device *hdev,
1113 struct nvsp_message *nvmsg)
1115 struct netvsc_device *nvscdev;
1116 struct net_device *ndev;
1120 nvscdev = get_outbound_net_device(hdev);
1123 ndev = nvscdev->ndev;
1125 count = nvmsg->msg.v5_msg.send_table.count;
1126 if (count != VRSS_SEND_TAB_SIZE) {
1127 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1131 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1132 nvmsg->msg.v5_msg.send_table.offset);
1134 for (i = 0; i < count; i++)
1135 nvscdev->send_table[i] = tab[i];
1138 static void netvsc_send_vf(struct netvsc_device *nvdev,
1139 struct nvsp_message *nvmsg)
1141 nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1142 nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1145 static inline void netvsc_receive_inband(struct hv_device *hdev,
1146 struct netvsc_device *nvdev,
1147 struct nvsp_message *nvmsg)
1149 switch (nvmsg->hdr.msg_type) {
1150 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1151 netvsc_send_table(hdev, nvmsg);
1154 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1155 netvsc_send_vf(nvdev, nvmsg);
1160 void netvsc_channel_cb(void *context)
1163 struct vmbus_channel *channel = (struct vmbus_channel *)context;
1164 struct hv_device *device;
1165 struct netvsc_device *net_device;
1168 struct vmpacket_descriptor *desc;
1169 unsigned char *buffer;
1170 int bufferlen = NETVSC_PACKET_SIZE;
1171 struct net_device *ndev;
1172 struct nvsp_message *nvmsg;
1174 if (channel->primary_channel != NULL)
1175 device = channel->primary_channel->device_obj;
1177 device = channel->device_obj;
1179 net_device = get_inbound_net_device(device);
1182 ndev = net_device->ndev;
1183 buffer = get_per_channel_state(channel);
1186 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1187 &bytes_recvd, &request_id);
1189 if (bytes_recvd > 0) {
1190 desc = (struct vmpacket_descriptor *)buffer;
1191 nvmsg = (struct nvsp_message *)((unsigned long)
1192 desc + (desc->offset8 << 3));
1193 switch (desc->type) {
1195 netvsc_send_completion(net_device,
1200 case VM_PKT_DATA_USING_XFER_PAGES:
1201 netvsc_receive(net_device, channel,
1205 case VM_PKT_DATA_INBAND:
1206 netvsc_receive_inband(device,
1213 "unhandled packet type %d, "
1214 "tid %llx len %d\n",
1215 desc->type, request_id,
1222 * We are done for this pass.
1227 } else if (ret == -ENOBUFS) {
1228 if (bufferlen > NETVSC_PACKET_SIZE)
1230 /* Handle large packet */
1231 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1232 if (buffer == NULL) {
1233 /* Try again next time around */
1235 "unable to allocate buffer of size "
1236 "(%d)!!\n", bytes_recvd);
1240 bufferlen = bytes_recvd;
1244 if (bufferlen > NETVSC_PACKET_SIZE)
1250 * netvsc_device_add - Callback when the device belonging to this
1253 int netvsc_device_add(struct hv_device *device, void *additional_info)
1257 ((struct netvsc_device_info *)additional_info)->ring_size;
1258 struct netvsc_device *net_device;
1259 struct net_device *ndev;
1261 net_device = alloc_net_device(device);
1265 net_device->ring_size = ring_size;
1268 * Coming into this function, struct net_device * is
1269 * registered as the driver private data.
1270 * In alloc_net_device(), we register struct netvsc_device *
1271 * as the driver private data and stash away struct net_device *
1272 * in struct netvsc_device *.
1274 ndev = net_device->ndev;
1276 /* Add netvsc_device context to netvsc_device */
1277 net_device->nd_ctx = netdev_priv(ndev);
1279 /* Initialize the NetVSC channel extension */
1280 init_completion(&net_device->channel_init_wait);
1282 set_per_channel_state(device->channel, net_device->cb_buffer);
1284 /* Open the channel */
1285 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1286 ring_size * PAGE_SIZE, NULL, 0,
1287 netvsc_channel_cb, device->channel);
1290 netdev_err(ndev, "unable to open channel: %d\n", ret);
1294 /* Channel is opened */
1295 pr_info("hv_netvsc channel opened successfully\n");
1297 net_device->chn_table[0] = device->channel;
1299 /* Connect with the NetVsp */
1300 ret = netvsc_connect_vsp(device);
1303 "unable to connect to NetVSP - %d\n", ret);
1310 /* Now, we can close the channel safely */
1311 vmbus_close(device->channel);
1314 free_netvsc_device(net_device);