Merge tag 'gfs2-4.8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[cascardo/linux.git] / include / linux / hyperv.h
index e6ef571..cd184bd 100644 (file)
@@ -674,6 +674,11 @@ enum hv_signal_policy {
        HV_SIGNAL_POLICY_EXPLICIT,
 };
 
+enum hv_numa_policy {
+       HV_BALANCED = 0,
+       HV_LOCALIZED,
+};
+
 enum vmbus_device_type {
        HV_IDE = 0,
        HV_SCSI,
@@ -701,9 +706,6 @@ struct vmbus_device {
 };
 
 struct vmbus_channel {
-       /* Unique channel id */
-       int id;
-
        struct list_head listentry;
 
        struct hv_device *device_obj;
@@ -876,6 +878,18 @@ struct vmbus_channel {
         */
        bool low_latency;
 
+       /*
+        * NUMA distribution policy:
+        * We support teo policies:
+        * 1) Balanced: Here all performance critical channels are
+        *    distributed evenly amongst all the NUMA nodes.
+        *    This policy will be the default policy.
+        * 2) Localized: All channels of a given instance of a
+        *    performance critical service will be assigned CPUs
+        *    within a selected NUMA node.
+        */
+       enum hv_numa_policy affinity_policy;
+
 };
 
 static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
@@ -895,6 +909,12 @@ static inline void set_channel_signal_state(struct vmbus_channel *c,
        c->signal_policy = policy;
 }
 
+static inline void set_channel_affinity_state(struct vmbus_channel *c,
+                                             enum hv_numa_policy policy)
+{
+       c->affinity_policy = policy;
+}
+
 static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
 {
        c->batched_reading = state;
@@ -1291,6 +1311,27 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
        .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
                        0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
 
+/*
+ * Linux doesn't support the 3 devices: the first two are for
+ * Automatic Virtual Machine Activation, and the third is for
+ * Remote Desktop Virtualization.
+ * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
+ * {3375baf4-9e15-4b30-b765-67acb10d607b}
+ * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ */
+
+#define HV_AVMA1_GUID \
+       .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+                       0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+
+#define HV_AVMA2_GUID \
+       .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+                       0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+
+#define HV_RDV_GUID \
+       .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+                       0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+
 /*
  * Common header for Hyper-V ICs
  */
@@ -1379,6 +1420,15 @@ struct ictimesync_data {
        u8 flags;
 } __packed;
 
+struct ictimesync_ref_data {
+       u64 parenttime;
+       u64 vmreferencetime;
+       u8 flags;
+       char leapflags;
+       char stratum;
+       u8 reserved[3];
+} __packed;
+
 struct hyperv_service_callback {
        u8 msg_type;
        char *log_msg;
@@ -1469,23 +1519,31 @@ static inline struct vmpacket_descriptor *
 get_next_pkt_raw(struct vmbus_channel *channel)
 {
        struct hv_ring_buffer_info *ring_info = &channel->inbound;
-       u32 priv_read_loc = ring_info->priv_read_index;
+       u32 read_loc = ring_info->priv_read_index;
        void *ring_buffer = hv_get_ring_buffer(ring_info);
+       struct vmpacket_descriptor *cur_desc;
+       u32 packetlen;
        u32 dsize = ring_info->ring_datasize;
-       /*
-        * delta is the difference between what is available to read and
-        * what was already consumed in place. We commit read index after
-        * the whole batch is processed.
-        */
-       u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
-               priv_read_loc - ring_info->ring_buffer->read_index :
-               (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
+       u32 delta = read_loc - ring_info->ring_buffer->read_index;
        u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
 
        if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
                return NULL;
 
-       return ring_buffer + priv_read_loc;
+       if ((read_loc + sizeof(*cur_desc)) > dsize)
+               return NULL;
+
+       cur_desc = ring_buffer + read_loc;
+       packetlen = cur_desc->len8 << 3;
+
+       /*
+        * If the packet under consideration is wrapping around,
+        * return failure.
+        */
+       if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
+               return NULL;
+
+       return cur_desc;
 }
 
 /*
@@ -1497,14 +1555,16 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
                                struct vmpacket_descriptor *desc)
 {
        struct hv_ring_buffer_info *ring_info = &channel->inbound;
+       u32 read_loc = ring_info->priv_read_index;
        u32 packetlen = desc->len8 << 3;
        u32 dsize = ring_info->ring_datasize;
 
+       if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
+               BUG();
        /*
         * Include the packet trailer.
         */
        ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
-       ring_info->priv_read_index %= dsize;
 }
 
 /*