2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
31 #include <linux/interrupt.h>
33 #include "hyperv_vmbus.h"
35 #define NUM_PAGES_SPANNED(addr, len) \
36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
39 * vmbus_setevent- Trigger an event notification on the specified
42 static void vmbus_setevent(struct vmbus_channel *channel)
44 struct hv_monitor_page *monitorpage;
47 * For channels marked as in "low latency" mode
48 * bypass the monitor page mechanism.
50 if ((channel->offermsg.monitor_allocated) &&
51 (!channel->low_latency)) {
52 /* Each u32 represents 32 channels */
53 sync_set_bit(channel->offermsg.child_relid & 31,
54 (unsigned long *) vmbus_connection.send_int_page +
55 (channel->offermsg.child_relid >> 5));
57 /* Get the child to parent monitor page */
58 monitorpage = vmbus_connection.monitor_pages[1];
60 sync_set_bit(channel->monitor_bit,
61 (unsigned long *)&monitorpage->trigger_group
62 [channel->monitor_grp].pending);
65 vmbus_set_event(channel);
70 * vmbus_open - Open the specified channel.
72 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
73 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
74 void (*onchannelcallback)(void *context), void *context)
76 struct vmbus_channel_open_channel *open_msg;
77 struct vmbus_channel_msginfo *open_info = NULL;
83 spin_lock_irqsave(&newchannel->lock, flags);
84 if (newchannel->state == CHANNEL_OPEN_STATE) {
85 newchannel->state = CHANNEL_OPENING_STATE;
87 spin_unlock_irqrestore(&newchannel->lock, flags);
90 spin_unlock_irqrestore(&newchannel->lock, flags);
92 newchannel->onchannel_callback = onchannelcallback;
93 newchannel->channel_callback_context = context;
95 /* Allocate the ring buffer */
96 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
97 GFP_KERNEL|__GFP_ZERO,
98 get_order(send_ringbuffer_size +
99 recv_ringbuffer_size));
102 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
103 get_order(send_ringbuffer_size +
104 recv_ringbuffer_size));
106 out = (void *)page_address(page);
113 in = (void *)((unsigned long)out + send_ringbuffer_size);
115 newchannel->ringbuffer_pages = out;
116 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
117 recv_ringbuffer_size) >> PAGE_SHIFT;
119 ret = hv_ringbuffer_init(
120 &newchannel->outbound, out, send_ringbuffer_size);
127 ret = hv_ringbuffer_init(
128 &newchannel->inbound, in, recv_ringbuffer_size);
135 /* Establish the gpadl for the ring buffer */
136 newchannel->ringbuffer_gpadlhandle = 0;
138 ret = vmbus_establish_gpadl(newchannel,
139 newchannel->outbound.ring_buffer,
140 send_ringbuffer_size +
141 recv_ringbuffer_size,
142 &newchannel->ringbuffer_gpadlhandle);
149 /* Create and init the channel open message */
150 open_info = kmalloc(sizeof(*open_info) +
151 sizeof(struct vmbus_channel_open_channel),
158 init_completion(&open_info->waitevent);
160 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
161 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
162 open_msg->openid = newchannel->offermsg.child_relid;
163 open_msg->child_relid = newchannel->offermsg.child_relid;
164 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
165 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
167 open_msg->target_vp = newchannel->target_vp;
169 if (userdatalen > MAX_USER_DEFINED_BYTES) {
175 memcpy(open_msg->userdata, userdata, userdatalen);
177 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
178 list_add_tail(&open_info->msglistentry,
179 &vmbus_connection.chn_msg_list);
180 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
182 ret = vmbus_post_msg(open_msg,
183 sizeof(struct vmbus_channel_open_channel));
190 wait_for_completion(&open_info->waitevent);
192 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
193 list_del(&open_info->msglistentry);
194 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
196 if (open_info->response.open_result.status) {
201 newchannel->state = CHANNEL_OPENED_STATE;
206 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
207 list_del(&open_info->msglistentry);
208 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
211 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
214 free_pages((unsigned long)out,
215 get_order(send_ringbuffer_size + recv_ringbuffer_size));
217 newchannel->state = CHANNEL_OPEN_STATE;
220 EXPORT_SYMBOL_GPL(vmbus_open);
222 /* Used for Hyper-V Socket: a guest client's connect() to the host */
223 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
224 const uuid_le *shv_host_servie_id)
226 struct vmbus_channel_tl_connect_request conn_msg;
228 memset(&conn_msg, 0, sizeof(conn_msg));
229 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
230 conn_msg.guest_endpoint_id = *shv_guest_servie_id;
231 conn_msg.host_service_id = *shv_host_servie_id;
233 return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
235 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
238 * create_gpadl_header - Creates a gpadl for the specified buffer
240 static int create_gpadl_header(void *kbuffer, u32 size,
241 struct vmbus_channel_msginfo **msginfo)
245 struct vmbus_channel_gpadl_header *gpadl_header;
246 struct vmbus_channel_gpadl_body *gpadl_body;
247 struct vmbus_channel_msginfo *msgheader;
248 struct vmbus_channel_msginfo *msgbody = NULL;
251 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
253 pagecount = size >> PAGE_SHIFT;
255 /* do we need a gpadl body msg */
256 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
257 sizeof(struct vmbus_channel_gpadl_header) -
258 sizeof(struct gpa_range);
259 pfncount = pfnsize / sizeof(u64);
261 if (pagecount > pfncount) {
262 /* we need a gpadl body */
263 /* fill in the header */
264 msgsize = sizeof(struct vmbus_channel_msginfo) +
265 sizeof(struct vmbus_channel_gpadl_header) +
266 sizeof(struct gpa_range) + pfncount * sizeof(u64);
267 msgheader = kzalloc(msgsize, GFP_KERNEL);
271 INIT_LIST_HEAD(&msgheader->submsglist);
272 msgheader->msgsize = msgsize;
274 gpadl_header = (struct vmbus_channel_gpadl_header *)
276 gpadl_header->rangecount = 1;
277 gpadl_header->range_buflen = sizeof(struct gpa_range) +
278 pagecount * sizeof(u64);
279 gpadl_header->range[0].byte_offset = 0;
280 gpadl_header->range[0].byte_count = size;
281 for (i = 0; i < pfncount; i++)
282 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
283 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
284 *msginfo = msgheader;
287 pfnleft = pagecount - pfncount;
289 /* how many pfns can we fit */
290 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
291 sizeof(struct vmbus_channel_gpadl_body);
292 pfncount = pfnsize / sizeof(u64);
294 /* fill in the body */
296 if (pfnleft > pfncount)
301 msgsize = sizeof(struct vmbus_channel_msginfo) +
302 sizeof(struct vmbus_channel_gpadl_body) +
303 pfncurr * sizeof(u64);
304 msgbody = kzalloc(msgsize, GFP_KERNEL);
307 struct vmbus_channel_msginfo *pos = NULL;
308 struct vmbus_channel_msginfo *tmp = NULL;
310 * Free up all the allocated messages.
312 list_for_each_entry_safe(pos, tmp,
313 &msgheader->submsglist,
316 list_del(&pos->msglistentry);
323 msgbody->msgsize = msgsize;
325 (struct vmbus_channel_gpadl_body *)msgbody->msg;
328 * Gpadl is u32 and we are using a pointer which could
330 * This is governed by the guest/host protocol and
331 * so the hypervisor gurantees that this is ok.
333 for (i = 0; i < pfncurr; i++)
334 gpadl_body->pfn[i] = slow_virt_to_phys(
335 kbuffer + PAGE_SIZE * (pfnsum + i)) >>
338 /* add to msg header */
339 list_add_tail(&msgbody->msglistentry,
340 &msgheader->submsglist);
345 /* everything fits in a header */
346 msgsize = sizeof(struct vmbus_channel_msginfo) +
347 sizeof(struct vmbus_channel_gpadl_header) +
348 sizeof(struct gpa_range) + pagecount * sizeof(u64);
349 msgheader = kzalloc(msgsize, GFP_KERNEL);
350 if (msgheader == NULL)
353 INIT_LIST_HEAD(&msgheader->submsglist);
354 msgheader->msgsize = msgsize;
356 gpadl_header = (struct vmbus_channel_gpadl_header *)
358 gpadl_header->rangecount = 1;
359 gpadl_header->range_buflen = sizeof(struct gpa_range) +
360 pagecount * sizeof(u64);
361 gpadl_header->range[0].byte_offset = 0;
362 gpadl_header->range[0].byte_count = size;
363 for (i = 0; i < pagecount; i++)
364 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
365 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
367 *msginfo = msgheader;
378 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
380 * @channel: a channel
381 * @kbuffer: from kmalloc or vmalloc
382 * @size: page-size multiple
383 * @gpadl_handle: some funky thing
385 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
386 u32 size, u32 *gpadl_handle)
388 struct vmbus_channel_gpadl_header *gpadlmsg;
389 struct vmbus_channel_gpadl_body *gpadl_body;
390 struct vmbus_channel_msginfo *msginfo = NULL;
391 struct vmbus_channel_msginfo *submsginfo, *tmp;
392 struct list_head *curr;
393 u32 next_gpadl_handle;
398 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
400 ret = create_gpadl_header(kbuffer, size, &msginfo);
404 init_completion(&msginfo->waitevent);
406 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
407 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
408 gpadlmsg->child_relid = channel->offermsg.child_relid;
409 gpadlmsg->gpadl = next_gpadl_handle;
412 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
413 list_add_tail(&msginfo->msglistentry,
414 &vmbus_connection.chn_msg_list);
416 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
418 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
423 list_for_each(curr, &msginfo->submsglist) {
424 submsginfo = (struct vmbus_channel_msginfo *)curr;
426 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
428 gpadl_body->header.msgtype =
429 CHANNELMSG_GPADL_BODY;
430 gpadl_body->gpadl = next_gpadl_handle;
432 ret = vmbus_post_msg(gpadl_body,
433 submsginfo->msgsize -
434 sizeof(*submsginfo));
439 wait_for_completion(&msginfo->waitevent);
441 /* At this point, we received the gpadl created msg */
442 *gpadl_handle = gpadlmsg->gpadl;
445 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
446 list_del(&msginfo->msglistentry);
447 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
448 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
456 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
459 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
461 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
463 struct vmbus_channel_gpadl_teardown *msg;
464 struct vmbus_channel_msginfo *info;
468 info = kmalloc(sizeof(*info) +
469 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
473 init_completion(&info->waitevent);
475 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
477 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
478 msg->child_relid = channel->offermsg.child_relid;
479 msg->gpadl = gpadl_handle;
481 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
482 list_add_tail(&info->msglistentry,
483 &vmbus_connection.chn_msg_list);
484 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
485 ret = vmbus_post_msg(msg,
486 sizeof(struct vmbus_channel_gpadl_teardown));
491 wait_for_completion(&info->waitevent);
494 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
495 list_del(&info->msglistentry);
496 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
501 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
503 static void reset_channel_cb(void *arg)
505 struct vmbus_channel *channel = arg;
507 channel->onchannel_callback = NULL;
510 static int vmbus_close_internal(struct vmbus_channel *channel)
512 struct vmbus_channel_close_channel *msg;
516 * process_chn_event(), running in the tasklet, can race
517 * with vmbus_close_internal() in the case of SMP guest, e.g., when
518 * the former is accessing channel->inbound.ring_buffer, the latter
519 * could be freeing the ring_buffer pages.
521 * To resolve the race, we can serialize them by disabling the
522 * tasklet when the latter is running here.
524 hv_event_tasklet_disable(channel);
527 * In case a device driver's probe() fails (e.g.,
528 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
529 * rescinded later (e.g., we dynamically disble an Integrated Service
530 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
531 * here we should skip most of the below cleanup work.
533 if (channel->state != CHANNEL_OPENED_STATE) {
538 channel->state = CHANNEL_OPEN_STATE;
539 channel->sc_creation_callback = NULL;
540 /* Stop callback and cancel the timer asap */
541 if (channel->target_cpu != get_cpu()) {
543 smp_call_function_single(channel->target_cpu, reset_channel_cb,
546 reset_channel_cb(channel);
550 /* Send a closing message */
552 msg = &channel->close_msg.msg;
554 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
555 msg->child_relid = channel->offermsg.child_relid;
557 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
560 pr_err("Close failed: close post msg return is %d\n", ret);
562 * If we failed to post the close msg,
563 * it is perhaps better to leak memory.
568 /* Tear down the gpadl for the channel's ring buffer */
569 if (channel->ringbuffer_gpadlhandle) {
570 ret = vmbus_teardown_gpadl(channel,
571 channel->ringbuffer_gpadlhandle);
573 pr_err("Close failed: teardown gpadl return %d\n", ret);
575 * If we failed to teardown gpadl,
576 * it is perhaps better to leak memory.
582 /* Cleanup the ring buffers for this channel */
583 hv_ringbuffer_cleanup(&channel->outbound);
584 hv_ringbuffer_cleanup(&channel->inbound);
586 free_pages((unsigned long)channel->ringbuffer_pages,
587 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
590 hv_event_tasklet_enable(channel);
596 * vmbus_close - Close the specified channel
598 void vmbus_close(struct vmbus_channel *channel)
600 struct list_head *cur, *tmp;
601 struct vmbus_channel *cur_channel;
603 if (channel->primary_channel != NULL) {
605 * We will only close sub-channels when
606 * the primary is closed.
611 * Close all the sub-channels first and then close the
614 list_for_each_safe(cur, tmp, &channel->sc_list) {
615 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
616 if (cur_channel->state != CHANNEL_OPENED_STATE)
618 vmbus_close_internal(cur_channel);
621 * Now close the primary.
623 vmbus_close_internal(channel);
625 EXPORT_SYMBOL_GPL(vmbus_close);
627 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
628 u32 bufferlen, u64 requestid,
629 enum vmbus_packet_type type, u32 flags, bool kick_q)
631 struct vmpacket_descriptor desc;
632 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
633 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
634 struct kvec bufferlist[3];
635 u64 aligned_data = 0;
638 bool lock = channel->acquire_ring_lock;
639 int num_vecs = ((bufferlen != 0) ? 3 : 1);
642 /* Setup the descriptor */
643 desc.type = type; /* VmbusPacketTypeDataInBand; */
644 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
645 /* in 8-bytes granularity */
646 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
647 desc.len8 = (u16)(packetlen_aligned >> 3);
648 desc.trans_id = requestid;
650 bufferlist[0].iov_base = &desc;
651 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
652 bufferlist[1].iov_base = buffer;
653 bufferlist[1].iov_len = bufferlen;
654 bufferlist[2].iov_base = &aligned_data;
655 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
657 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
658 &signal, lock, channel->signal_policy);
661 * Signalling the host is conditional on many factors:
662 * 1. The ring state changed from being empty to non-empty.
663 * This is tracked by the variable "signal".
664 * 2. The variable kick_q tracks if more data will be placed
665 * on the ring. We will not signal if more data is
668 * Based on the channel signal state, we will decide
669 * which signaling policy will be applied.
671 * If we cannot write to the ring-buffer; signal the host
672 * even if we may not have written anything. This is a rare
673 * enough condition that it should not matter.
674 * NOTE: in this case, the hvsock channel is an exception, because
675 * it looks the host side's hvsock implementation has a throttling
676 * mechanism which can hurt the performance otherwise.
679 if (((ret == 0) && kick_q && signal) ||
680 (ret && !is_hvsock_channel(channel)))
681 vmbus_setevent(channel);
685 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
688 * vmbus_sendpacket() - Send the specified buffer on the given channel
689 * @channel: Pointer to vmbus_channel structure.
690 * @buffer: Pointer to the buffer you want to receive the data into.
691 * @bufferlen: Maximum size of what the the buffer will hold
692 * @requestid: Identifier of the request
693 * @type: Type of packet that is being send e.g. negotiate, time
696 * Sends data in @buffer directly to hyper-v via the vmbus
697 * This will send the data unparsed to hyper-v.
699 * Mainly used by Hyper-V drivers.
701 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
702 u32 bufferlen, u64 requestid,
703 enum vmbus_packet_type type, u32 flags)
705 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
708 EXPORT_SYMBOL(vmbus_sendpacket);
711 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
712 * packets using a GPADL Direct packet type. This interface allows you
713 * to control notifying the host. This will be useful for sending
714 * batched data. Also the sender can control the send flags
717 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
718 struct hv_page_buffer pagebuffers[],
719 u32 pagecount, void *buffer, u32 bufferlen,
726 struct vmbus_channel_packet_page_buffer desc;
729 u32 packetlen_aligned;
730 struct kvec bufferlist[3];
731 u64 aligned_data = 0;
733 bool lock = channel->acquire_ring_lock;
735 if (pagecount > MAX_PAGE_BUFFER_COUNT)
740 * Adjust the size down since vmbus_channel_packet_page_buffer is the
741 * largest size we support
743 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
744 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
745 sizeof(struct hv_page_buffer));
746 packetlen = descsize + bufferlen;
747 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
749 /* Setup the descriptor */
750 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
752 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
753 desc.length8 = (u16)(packetlen_aligned >> 3);
754 desc.transactionid = requestid;
755 desc.rangecount = pagecount;
757 for (i = 0; i < pagecount; i++) {
758 desc.range[i].len = pagebuffers[i].len;
759 desc.range[i].offset = pagebuffers[i].offset;
760 desc.range[i].pfn = pagebuffers[i].pfn;
763 bufferlist[0].iov_base = &desc;
764 bufferlist[0].iov_len = descsize;
765 bufferlist[1].iov_base = buffer;
766 bufferlist[1].iov_len = bufferlen;
767 bufferlist[2].iov_base = &aligned_data;
768 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
770 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
771 &signal, lock, channel->signal_policy);
774 * Signalling the host is conditional on many factors:
775 * 1. The ring state changed from being empty to non-empty.
776 * This is tracked by the variable "signal".
777 * 2. The variable kick_q tracks if more data will be placed
778 * on the ring. We will not signal if more data is
781 * Based on the channel signal state, we will decide
782 * which signaling policy will be applied.
784 * If we cannot write to the ring-buffer; signal the host
785 * even if we may not have written anything. This is a rare
786 * enough condition that it should not matter.
789 if (((ret == 0) && kick_q && signal) || (ret))
790 vmbus_setevent(channel);
794 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
797 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
798 * packets using a GPADL Direct packet type.
800 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
801 struct hv_page_buffer pagebuffers[],
802 u32 pagecount, void *buffer, u32 bufferlen,
805 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
806 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
807 buffer, bufferlen, requestid,
811 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
814 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
815 * using a GPADL Direct packet type.
816 * The buffer includes the vmbus descriptor.
818 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
819 struct vmbus_packet_mpb_array *desc,
821 void *buffer, u32 bufferlen, u64 requestid)
825 u32 packetlen_aligned;
826 struct kvec bufferlist[3];
827 u64 aligned_data = 0;
829 bool lock = channel->acquire_ring_lock;
831 packetlen = desc_size + bufferlen;
832 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
834 /* Setup the descriptor */
835 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
836 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
837 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
838 desc->length8 = (u16)(packetlen_aligned >> 3);
839 desc->transactionid = requestid;
840 desc->rangecount = 1;
842 bufferlist[0].iov_base = desc;
843 bufferlist[0].iov_len = desc_size;
844 bufferlist[1].iov_base = buffer;
845 bufferlist[1].iov_len = bufferlen;
846 bufferlist[2].iov_base = &aligned_data;
847 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
849 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
850 &signal, lock, channel->signal_policy);
852 if (ret == 0 && signal)
853 vmbus_setevent(channel);
857 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
860 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
861 * using a GPADL Direct packet type.
863 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
864 struct hv_multipage_buffer *multi_pagebuffer,
865 void *buffer, u32 bufferlen, u64 requestid)
868 struct vmbus_channel_packet_multipage_buffer desc;
871 u32 packetlen_aligned;
872 struct kvec bufferlist[3];
873 u64 aligned_data = 0;
875 bool lock = channel->acquire_ring_lock;
876 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
877 multi_pagebuffer->len);
879 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
883 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
884 * the largest size we support
886 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
887 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
889 packetlen = descsize + bufferlen;
890 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
893 /* Setup the descriptor */
894 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
895 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
896 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
897 desc.length8 = (u16)(packetlen_aligned >> 3);
898 desc.transactionid = requestid;
901 desc.range.len = multi_pagebuffer->len;
902 desc.range.offset = multi_pagebuffer->offset;
904 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
905 pfncount * sizeof(u64));
907 bufferlist[0].iov_base = &desc;
908 bufferlist[0].iov_len = descsize;
909 bufferlist[1].iov_base = buffer;
910 bufferlist[1].iov_len = bufferlen;
911 bufferlist[2].iov_base = &aligned_data;
912 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
914 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
915 &signal, lock, channel->signal_policy);
917 if (ret == 0 && signal)
918 vmbus_setevent(channel);
922 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
925 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
926 * @channel: Pointer to vmbus_channel structure.
927 * @buffer: Pointer to the buffer you want to receive the data into.
928 * @bufferlen: Maximum size of what the the buffer will hold
929 * @buffer_actual_len: The actual size of the data after it was received
930 * @requestid: Identifier of the request
932 * Receives directly from the hyper-v vmbus and puts the data it received
933 * into Buffer. This will receive the data unparsed from hyper-v.
935 * Mainly used by Hyper-V drivers.
938 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
939 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
945 ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
946 buffer_actual_len, requestid, &signal, raw);
949 vmbus_setevent(channel);
954 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
955 u32 bufferlen, u32 *buffer_actual_len,
958 return __vmbus_recvpacket(channel, buffer, bufferlen,
959 buffer_actual_len, requestid, false);
961 EXPORT_SYMBOL(vmbus_recvpacket);
964 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
966 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
967 u32 bufferlen, u32 *buffer_actual_len,
970 return __vmbus_recvpacket(channel, buffer, bufferlen,
971 buffer_actual_len, requestid, true);
973 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);