ibmvnic: map L2/L3/L4 header descriptors to firmware
[cascardo/linux.git] / drivers / net / ethernet / ibm / ibmvnic.c
index 6e9e16e..4e97e76 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/proc_fs.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
@@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                       union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
 static int enable_scrq_irq(struct ibmvnic_adapter *,
                           struct ibmvnic_sub_crq_queue *);
@@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev)
        return 0;
 }
 
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers.  Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+                         int *hdr_len, u8 *hdr_data)
+{
+       int len = 0;
+       u8 *hdr;
+
+       hdr_len[0] = sizeof(struct ethhdr);
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               hdr_len[1] = ip_hdr(skb)->ihl * 4;
+               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                       hdr_len[2] = tcp_hdrlen(skb);
+               else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+                       hdr_len[2] = sizeof(struct udphdr);
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               hdr_len[1] = sizeof(struct ipv6hdr);
+               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                       hdr_len[2] = tcp_hdrlen(skb);
+               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+                       hdr_len[2] = sizeof(struct udphdr);
+       }
+
+       memset(hdr_data, 0, 120);
+       if ((hdr_field >> 6) & 1) {
+               hdr = skb_mac_header(skb);
+               memcpy(hdr_data, hdr, hdr_len[0]);
+               len += hdr_len[0];
+       }
+
+       if ((hdr_field >> 5) & 1) {
+               hdr = skb_network_header(skb);
+               memcpy(hdr_data + len, hdr, hdr_len[1]);
+               len += hdr_len[1];
+       }
+
+       if ((hdr_field >> 4) & 1) {
+               hdr = skb_transport_header(skb);
+               memcpy(hdr_data + len, hdr, hdr_len[2]);
+               len += hdr_len[2];
+       }
+       return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+                            union sub_crq *scrq_arr)
+{
+       union sub_crq hdr_desc;
+       int tmp_len = len;
+       u8 *data, *cur;
+       int tmp;
+
+       while (tmp_len > 0) {
+               cur = hdr_data + len - tmp_len;
+
+               memset(&hdr_desc, 0, sizeof(hdr_desc));
+               if (cur != hdr_data) {
+                       data = hdr_desc.hdr_ext.data;
+                       tmp = tmp_len > 29 ? 29 : tmp_len;
+                       hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+                       hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+                       hdr_desc.hdr_ext.len = tmp;
+               } else {
+                       data = hdr_desc.hdr.data;
+                       tmp = tmp_len > 24 ? 24 : tmp_len;
+                       hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+                       hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+                       hdr_desc.hdr.len = tmp;
+                       hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+                       hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+                       hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+                       hdr_desc.hdr.flag = hdr_field << 1;
+               }
+               memcpy(data, cur, tmp);
+               tmp_len -= tmp;
+               *scrq_arr = hdr_desc;
+               scrq_arr++;
+       }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+                               int *num_entries, u8 hdr_field)
+{
+       int hdr_len[3] = {0, 0, 0};
+       int tot_len, len;
+       u8 *hdr_data = txbuff->hdr_data;
+
+       tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+                                txbuff->hdr_data);
+       len = tot_len;
+       len -= 24;
+       if (len > 0)
+               num_entries += len % 29 ? len / 29 + 1 : len / 29;
+       create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+                        txbuff->indir_arr + 1);
+}
+
 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        int queue_num = skb_get_queue_mapping(skb);
+       u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_tx_buff *tx_buff = NULL;
        struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned long lpar_rc;
        union sub_crq tx_crq;
        unsigned int offset;
+       int num_entries = 1;
        unsigned char *dst;
        u64 *handle_array;
        int index = 0;
@@ -644,11 +778,34 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
        }
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
-       lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+               hdrs += 2;
+       }
+       /* determine if l2/3/4 headers are sent to firmware */
+       if ((*hdrs >> 7) & 1 &&
+           (skb->protocol == htons(ETH_P_IP) ||
+            skb->protocol == htons(ETH_P_IPV6))) {
+               build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+               tx_crq.v1.n_crq_elem = num_entries;
+               tx_buff->indir_arr[0] = tx_crq;
+               tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+                                                   sizeof(tx_buff->indir_arr),
+                                                   DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+                       if (!firmware_has_feature(FW_FEATURE_CMO))
+                               dev_err(dev, "tx: unable to map descriptor array\n");
+                       tx_map_failed++;
+                       tx_dropped++;
+                       ret = NETDEV_TX_BUSY;
+                       goto out;
+               }
+               lpar_rc = send_subcrq_indirect(adapter, handle_array[0],
+                                              (u64)tx_buff->indir_dma,
+                                              (u64)num_entries);
+       } else {
+               lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
+       }
        if (lpar_rc != H_SUCCESS) {
                dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 
@@ -1159,6 +1316,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
        union sub_crq *next;
        int index;
        int i, j;
+       u8 first;
 
 restart_loop:
        while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1339,13 @@ restart_loop:
                                txbuff->data_dma[j] = 0;
                                txbuff->used_bounce = false;
                        }
+                       /* if sub_crq was sent indirectly */
+                       first = txbuff->indir_arr[0].generic.first;
+                       if (first == IBMVNIC_CRQ_CMD) {
+                               dma_unmap_single(dev, txbuff->indir_dma,
+                                                sizeof(txbuff->indir_arr),
+                                                DMA_TO_DEVICE);
+                       }
 
                        if (txbuff->last_frag)
                                dev_kfree_skb_any(txbuff->skb);
@@ -1494,6 +1659,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
        return rc;
 }
 
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+                               u64 remote_handle, u64 ioba, u64 num_entries)
+{
+       unsigned int ua = adapter->vdev->unit_address;
+       struct device *dev = &adapter->vdev->dev;
+       int rc;
+
+       /* Make sure the hypervisor sees the complete request */
+       mb();
+       rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+                               cpu_to_be64(remote_handle),
+                               ioba, num_entries);
+
+       if (rc) {
+               if (rc == H_CLOSED)
+                       dev_warn(dev, "CRQ Queue closed\n");
+               dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+       }
+
+       return rc;
+}
+
 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
                            union ibmvnic_crq *crq)
 {