ibmvnic: simplify and improve driver probe function
[cascardo/linux.git] / drivers / net / ethernet / ibm / ibmvnic.c
index 6e9e16e..52b0c07 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/proc_fs.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
@@ -88,12 +89,14 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 static int ibmvnic_remove(struct vio_dev *);
 static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                       union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
 static int enable_scrq_irq(struct ibmvnic_adapter *,
                           struct ibmvnic_sub_crq_queue *);
@@ -467,7 +470,8 @@ static int ibmvnic_open(struct net_device *netdev)
        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
        ibmvnic_send_crq(adapter, &crq);
 
-       netif_start_queue(netdev);
+       netif_tx_start_all_queues(netdev);
+
        return 0;
 
 bounce_map_failed:
@@ -517,7 +521,7 @@ static int ibmvnic_close(struct net_device *netdev)
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_disable(&adapter->napi[i]);
 
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        if (adapter->bounce_buffer) {
                if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -561,10 +565,141 @@ static int ibmvnic_close(struct net_device *netdev)
        return 0;
 }
 
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers.  Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+                         int *hdr_len, u8 *hdr_data)
+{
+       int len = 0;
+       u8 *hdr;
+
+       hdr_len[0] = sizeof(struct ethhdr);
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               hdr_len[1] = ip_hdr(skb)->ihl * 4;
+               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                       hdr_len[2] = tcp_hdrlen(skb);
+               else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+                       hdr_len[2] = sizeof(struct udphdr);
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               hdr_len[1] = sizeof(struct ipv6hdr);
+               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                       hdr_len[2] = tcp_hdrlen(skb);
+               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+                       hdr_len[2] = sizeof(struct udphdr);
+       }
+
+       memset(hdr_data, 0, 120);
+       if ((hdr_field >> 6) & 1) {
+               hdr = skb_mac_header(skb);
+               memcpy(hdr_data, hdr, hdr_len[0]);
+               len += hdr_len[0];
+       }
+
+       if ((hdr_field >> 5) & 1) {
+               hdr = skb_network_header(skb);
+               memcpy(hdr_data + len, hdr, hdr_len[1]);
+               len += hdr_len[1];
+       }
+
+       if ((hdr_field >> 4) & 1) {
+               hdr = skb_transport_header(skb);
+               memcpy(hdr_data + len, hdr, hdr_len[2]);
+               len += hdr_len[2];
+       }
+       return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+                            union sub_crq *scrq_arr)
+{
+       union sub_crq hdr_desc;
+       int tmp_len = len;
+       u8 *data, *cur;
+       int tmp;
+
+       while (tmp_len > 0) {
+               cur = hdr_data + len - tmp_len;
+
+               memset(&hdr_desc, 0, sizeof(hdr_desc));
+               if (cur != hdr_data) {
+                       data = hdr_desc.hdr_ext.data;
+                       tmp = tmp_len > 29 ? 29 : tmp_len;
+                       hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+                       hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+                       hdr_desc.hdr_ext.len = tmp;
+               } else {
+                       data = hdr_desc.hdr.data;
+                       tmp = tmp_len > 24 ? 24 : tmp_len;
+                       hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+                       hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+                       hdr_desc.hdr.len = tmp;
+                       hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+                       hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+                       hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+                       hdr_desc.hdr.flag = hdr_field << 1;
+               }
+               memcpy(data, cur, tmp);
+               tmp_len -= tmp;
+               *scrq_arr = hdr_desc;
+               scrq_arr++;
+       }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+                               int *num_entries, u8 hdr_field)
+{
+       int hdr_len[3] = {0, 0, 0};
+       int tot_len, len;
+       u8 *hdr_data = txbuff->hdr_data;
+
+       tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+                                txbuff->hdr_data);
+       len = tot_len;
+       len -= 24;
+       if (len > 0)
+               num_entries += len % 29 ? len / 29 + 1 : len / 29;
+       create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+                        txbuff->indir_arr + 1);
+}
+
 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        int queue_num = skb_get_queue_mapping(skb);
+       u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_tx_buff *tx_buff = NULL;
        struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +714,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned long lpar_rc;
        union sub_crq tx_crq;
        unsigned int offset;
+       int num_entries = 1;
        unsigned char *dst;
        u64 *handle_array;
        int index = 0;
@@ -644,11 +780,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
        }
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
-       lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+               hdrs += 2;
+       }
+       /* determine if l2/3/4 headers are sent to firmware */
+       if ((*hdrs >> 7) & 1 &&
+           (skb->protocol == htons(ETH_P_IP) ||
+            skb->protocol == htons(ETH_P_IPV6))) {
+               build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+               tx_crq.v1.n_crq_elem = num_entries;
+               tx_buff->indir_arr[0] = tx_crq;
+               tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+                                                   sizeof(tx_buff->indir_arr),
+                                                   DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+                       if (!firmware_has_feature(FW_FEATURE_CMO))
+                               dev_err(dev, "tx: unable to map descriptor array\n");
+                       tx_map_failed++;
+                       tx_dropped++;
+                       ret = NETDEV_TX_BUSY;
+                       goto out;
+               }
+               lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+                                              (u64)tx_buff->indir_dma,
+                                              (u64)num_entries);
+       } else {
+               lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+                                     &tx_crq);
+       }
        if (lpar_rc != H_SUCCESS) {
                dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 
@@ -832,7 +992,7 @@ restart_poll:
                netdev->stats.rx_bytes += length;
                frames_processed++;
        }
-       replenish_pools(adapter);
+       replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
 
        if (frames_processed < budget) {
                enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1054,12 +1214,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
                goto reg_failed;
        }
 
-       scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
-       if (scrq->irq == NO_IRQ) {
-               dev_err(dev, "Error mapping irq\n");
-               goto map_irq_failed;
-       }
-
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
        scrq->cur = 0;
@@ -1072,12 +1226,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        return scrq;
 
-map_irq_failed:
-       do {
-               rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
-                                       adapter->vdev->unit_address,
-                                       scrq->crq_num);
-       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
@@ -1098,6 +1246,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->tx_scrq[i]) {
                                free_irq(adapter->tx_scrq[i]->irq,
                                         adapter->tx_scrq[i]);
+                               irq_dispose_mapping(adapter->tx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->tx_scrq[i]);
                        }
@@ -1109,6 +1258,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->rx_scrq[i]) {
                                free_irq(adapter->rx_scrq[i]->irq,
                                         adapter->rx_scrq[i]);
+                               irq_dispose_mapping(adapter->rx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->rx_scrq[i]);
                        }
@@ -1118,6 +1268,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
        adapter->requested_caps = 0;
 }
 
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+       int i;
+
+       if (adapter->tx_scrq) {
+               for (i = 0; i < adapter->req_tx_queues; i++)
+                       if (adapter->tx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->tx_scrq[i]);
+               adapter->tx_scrq = NULL;
+       }
+
+       if (adapter->rx_scrq) {
+               for (i = 0; i < adapter->req_rx_queues; i++)
+                       if (adapter->rx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->rx_scrq[i]);
+               adapter->rx_scrq = NULL;
+       }
+
+       adapter->requested_caps = 0;
+}
+
 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
                            struct ibmvnic_sub_crq_queue *scrq)
 {
@@ -1159,6 +1332,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
        union sub_crq *next;
        int index;
        int i, j;
+       u8 first;
 
 restart_loop:
        while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1355,13 @@ restart_loop:
                                txbuff->data_dma[j] = 0;
                                txbuff->used_bounce = false;
                        }
+                       /* if sub_crq was sent indirectly */
+                       first = txbuff->indir_arr[0].generic.first;
+                       if (first == IBMVNIC_CRQ_CMD) {
+                               dma_unmap_single(dev, txbuff->indir_dma,
+                                                sizeof(txbuff->indir_arr),
+                                                DMA_TO_DEVICE);
+                       }
 
                        if (txbuff->last_frag)
                                dev_kfree_skb_any(txbuff->skb);
@@ -1229,6 +1410,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
        return IRQ_HANDLED;
 }
 
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+       struct ibmvnic_sub_crq_queue *scrq;
+       int i = 0, j = 0;
+       int rc = 0;
+
+       for (i = 0; i < adapter->req_tx_queues; i++) {
+               scrq = adapter->tx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_tx_irq_failed;
+               }
+
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+                                0, "ibmvnic_tx", scrq);
+
+               if (rc) {
+                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               scrq = adapter->rx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_rx_irq_failed;
+               }
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+                                0, "ibmvnic_rx", scrq);
+               if (rc) {
+                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+       return rc;
+
+req_rx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       i = adapter->req_tx_queues;
+req_tx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       release_sub_crqs_no_irqs(adapter);
+       return rc;
+}
+
 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 {
        struct device *dev = &adapter->vdev->dev;
@@ -1237,8 +1478,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        union ibmvnic_crq crq;
        int total_queues;
        int more = 0;
-       int i, j;
-       int rc;
+       int i;
 
        if (!retry) {
                /* Sub-CRQ entries are 32 byte long */
@@ -1261,9 +1501,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
                    entries_page : adapter->max_rx_add_entries_per_subcrq;
 
                /* Choosing the maximum number of queues supported by firmware*/
-               adapter->req_tx_queues = adapter->min_tx_queues;
-               adapter->req_rx_queues = adapter->min_rx_queues;
-               adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+               adapter->req_tx_queues = adapter->max_tx_queues;
+               adapter->req_rx_queues = adapter->max_rx_queues;
+               adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 
                adapter->req_mtu = adapter->max_mtu;
        }
@@ -1317,13 +1557,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_tx_queues; i++) {
                adapter->tx_scrq[i] = allqueues[i];
                adapter->tx_scrq[i]->pool_index = i;
-               rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
-                                0, "ibmvnic_tx", adapter->tx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
-                               adapter->tx_scrq[i]->irq, rc);
-                       goto req_tx_irq_failed;
-               }
        }
 
        adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1334,13 +1567,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_rx_queues; i++) {
                adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
                adapter->rx_scrq[i]->scrq_num = i;
-               rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
-                                0, "ibmvnic_rx", adapter->rx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
-                               adapter->rx_scrq[i]->irq, rc);
-                       goto req_rx_irq_failed;
-               }
        }
 
        memset(&crq, 0, sizeof(crq));
@@ -1393,15 +1619,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 
        return;
 
-req_rx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
-       i = adapter->req_tx_queues;
-req_tx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
-       kfree(adapter->rx_scrq);
-       adapter->rx_scrq = NULL;
 rx_failed:
        kfree(adapter->tx_scrq);
        adapter->tx_scrq = NULL;
@@ -1494,6 +1711,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
        return rc;
 }
 
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+                               u64 remote_handle, u64 ioba, u64 num_entries)
+{
+       unsigned int ua = adapter->vdev->unit_address;
+       struct device *dev = &adapter->vdev->dev;
+       int rc;
+
+       /* Make sure the hypervisor sees the complete request */
+       mb();
+       rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+                               cpu_to_be64(remote_handle),
+                               ioba, num_entries);
+
+       if (rc) {
+               if (rc == H_CLOSED)
+                       dev_warn(dev, "CRQ Queue closed\n");
+               dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+       }
+
+       return rc;
+}
+
 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
                            union ibmvnic_crq *crq)
 {
@@ -1589,13 +1828,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
                goto buf_map_failed;
        }
 
-       rsp_buffer_size =
-           sizeof(struct ibmvnic_login_rsp_buffer) +
-           sizeof(u64) * (adapter->req_tx_queues +
-                          adapter->req_rx_queues *
-                          adapter->req_rx_add_queues + adapter->
-                          req_rx_add_queues) +
-           sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+       rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+                         sizeof(u64) * adapter->req_tx_queues +
+                         sizeof(u64) * adapter->req_rx_queues +
+                         sizeof(u64) * adapter->req_rx_queues +
+                         sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
 
        login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
        if (!login_rsp_buffer)
@@ -1918,6 +2155,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
                adapter->netdev->features |= NETIF_F_IPV6_CSUM;
 
+       if ((adapter->netdev->features &
+           (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+               adapter->netdev->features |= NETIF_F_RXCSUM;
+
        memset(&crq, 0, sizeof(crq));
        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
        crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -1931,7 +2172,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
                                  struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp;
        unsigned long flags;
        bool found = false;
        int i;
@@ -1943,7 +2184,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
        }
 
        spin_lock_irqsave(&adapter->error_list_lock, flags);
-       list_for_each_entry(error_buff, &adapter->errors, list)
+       list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
                if (error_buff->error_id == crq->request_error_rsp.error_id) {
                        found = true;
                        list_del(&error_buff->list);
@@ -2158,9 +2399,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be32_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
+               release_sub_crqs_no_irqs(adapter);
                *req_value = be32_to_cpu(crq->request_capability_rsp.number);
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 1);
                return;
        default:
                dev_err(dev, "Error %d in request cap rsp\n",
@@ -2210,6 +2451,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
        dma_unmap_single(dev, adapter->login_rsp_buf_token,
                         adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
 
+       /* If the number of queues requested can't be allocated by the
+        * server, the login response will return with code 1. We will need
+        * to resend the login buffer with fewer queues requested.
+        */
+       if (login_rsp_crq->generic.rc.code) {
+               adapter->renegotiate = true;
+               complete(&adapter->init_done);
+               return 0;
+       }
+
        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
        for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
                netdev_dbg(adapter->netdev, "%016lx\n",
@@ -2459,7 +2710,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
 
 out:
        if (atomic_read(&adapter->running_cap_queries) == 0)
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 0);
                /* We're done querying the capabilities, initialize sub-crqs */
 }
 
@@ -2941,14 +3192,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
 
 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
 {
-       struct ibmvnic_inflight_cmd *inflight_cmd;
+       struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp2;
        unsigned long flags;
        unsigned long flags2;
 
        spin_lock_irqsave(&adapter->inflight_lock, flags);
-       list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+       list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
                switch (inflight_cmd->crq.generic.cmd) {
                case LOGIN:
                        dma_unmap_single(dev, adapter->login_buf_token,
@@ -2965,8 +3216,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
                        break;
                case REQUEST_ERROR_INFO:
                        spin_lock_irqsave(&adapter->error_list_lock, flags2);
-                       list_for_each_entry(error_buff, &adapter->errors,
-                                           list) {
+                       list_for_each_entry_safe(error_buff, tmp2,
+                                                &adapter->errors, list) {
                                dma_unmap_single(dev, error_buff->dma,
                                                 error_buff->len,
                                                 DMA_FROM_DEVICE);
@@ -3357,6 +3608,7 @@ static const struct file_operations ibmvnic_dump_ops = {
 
 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
+       unsigned long timeout = msecs_to_jiffies(30000);
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
@@ -3435,23 +3687,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        ibmvnic_send_crq_init(adapter);
 
        init_completion(&adapter->init_done);
-       wait_for_completion(&adapter->init_done);
-
-       /* needed to pull init_sub_crqs outside of an interrupt context
-        * because it creates IRQ mappings for the subCRQ queues, causing
-        * a kernel warning
-        */
-       init_sub_crqs(adapter, 0);
-
-       reinit_completion(&adapter->init_done);
-       wait_for_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+               return 0;
 
-       /* if init_sub_crqs is partially successful, retry */
-       while (!adapter->tx_scrq || !adapter->rx_scrq) {
-               init_sub_crqs(adapter, 1);
+       do {
+               if (adapter->renegotiate) {
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
+                       send_cap_queries(adapter);
+
+                       reinit_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout))
+                               return 0;
+               }
+       } while (adapter->renegotiate);
 
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
+       rc = init_sub_crq_irqs(adapter);
+       if (rc) {
+               dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+               goto free_debugfs;
        }
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3459,12 +3714,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto free_debugfs;
+               goto free_sub_crqs;
        }
        dev_info(&dev->dev, "ibmvnic registered\n");
 
        return 0;
 
+free_sub_crqs:
+       release_sub_crqs(adapter);
 free_debugfs:
        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
                debugfs_remove_recursive(adapter->debugfs_dir);