1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
113 struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
118 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122 static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
147 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
161 /* net_device_ops functions */
163 static void init_rx_pool(struct ibmvnic_adapter *adapter,
164 struct ibmvnic_rx_pool *rx_pool, int num, int index,
165 int buff_size, int active)
167 netdev_dbg(adapter->netdev,
168 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
169 index, num, buff_size);
171 rx_pool->index = index;
172 rx_pool->buff_size = buff_size;
173 rx_pool->active = active;
176 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
177 struct ibmvnic_long_term_buff *ltb, int size)
179 struct device *dev = &adapter->vdev->dev;
182 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
186 dev_err(dev, "Couldn't alloc long term buffer\n");
189 ltb->map_id = adapter->map_id;
191 send_request_map(adapter, ltb->addr,
192 ltb->size, ltb->map_id);
193 init_completion(&adapter->fw_done);
194 wait_for_completion(&adapter->fw_done);
198 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
199 struct ibmvnic_long_term_buff *ltb)
201 struct device *dev = &adapter->vdev->dev;
203 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
204 send_request_unmap(adapter, ltb->map_id);
207 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_rx_pool *pool)
210 struct device *dev = &adapter->vdev->dev;
213 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
217 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
220 if (!pool->rx_buff) {
221 dev_err(dev, "Couldn't alloc rx buffers\n");
222 kfree(pool->free_map);
226 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
227 pool->size * pool->buff_size)) {
228 kfree(pool->free_map);
229 kfree(pool->rx_buff);
233 for (i = 0; i < pool->size; ++i)
234 pool->free_map[i] = i;
236 atomic_set(&pool->available, 0);
237 pool->next_alloc = 0;
243 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
244 struct ibmvnic_rx_pool *pool)
246 int count = pool->size - atomic_read(&pool->available);
247 struct device *dev = &adapter->vdev->dev;
248 int buffers_added = 0;
249 unsigned long lpar_rc;
250 union sub_crq sub_crq;
260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
272 index = pool->free_map[pool->next_free];
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
302 #ifdef __LITTLE_ENDIAN__
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
309 if (lpar_rc != H_SUCCESS)
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
316 atomic_add(buffers_added, &pool->available);
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
332 static void replenish_pools(struct ibmvnic_adapter *adapter)
336 if (adapter->migrated)
339 adapter->replenish_task_cycles++;
340 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
342 if (adapter->rx_pool[i].active)
343 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
347 static void free_rx_pool(struct ibmvnic_adapter *adapter,
348 struct ibmvnic_rx_pool *pool)
352 kfree(pool->free_map);
353 pool->free_map = NULL;
358 for (i = 0; i < pool->size; i++) {
359 if (pool->rx_buff[i].skb) {
360 dev_kfree_skb_any(pool->rx_buff[i].skb);
361 pool->rx_buff[i].skb = NULL;
364 kfree(pool->rx_buff);
365 pool->rx_buff = NULL;
368 static int ibmvnic_open(struct net_device *netdev)
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_tx_pool *tx_pool;
373 union ibmvnic_crq crq;
380 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
382 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
383 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
384 be32_to_cpu(adapter->login_rsp_buf->
385 off_rxadd_buff_size));
387 adapter->napi = kcalloc(adapter->req_rx_queues,
388 sizeof(struct napi_struct), GFP_KERNEL);
390 goto alloc_napi_failed;
391 for (i = 0; i < adapter->req_rx_queues; i++) {
392 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
394 napi_enable(&adapter->napi[i]);
397 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
399 if (!adapter->rx_pool)
400 goto rx_pool_arr_alloc_failed;
401 send_map_query(adapter);
402 for (i = 0; i < rxadd_subcrqs; i++) {
403 init_rx_pool(adapter, &adapter->rx_pool[i],
404 IBMVNIC_BUFFS_PER_POOL, i,
405 be64_to_cpu(size_array[i]), 1);
406 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
407 dev_err(dev, "Couldn't alloc rx pool\n");
408 goto rx_pool_alloc_failed;
412 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
414 if (!adapter->tx_pool)
415 goto tx_pool_arr_alloc_failed;
416 for (i = 0; i < tx_subcrqs; i++) {
417 tx_pool = &adapter->tx_pool[i];
419 kcalloc(adapter->max_tx_entries_per_subcrq,
420 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
421 if (!tx_pool->tx_buff)
422 goto tx_pool_alloc_failed;
424 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
425 adapter->max_tx_entries_per_subcrq *
427 goto tx_ltb_alloc_failed;
430 kcalloc(adapter->max_tx_entries_per_subcrq,
431 sizeof(int), GFP_KERNEL);
432 if (!tx_pool->free_map)
433 goto tx_fm_alloc_failed;
435 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
436 tx_pool->free_map[j] = j;
438 tx_pool->consumer_index = 0;
439 tx_pool->producer_index = 0;
441 adapter->bounce_buffer_size =
442 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
443 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
445 if (!adapter->bounce_buffer)
446 goto bounce_alloc_failed;
448 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
449 adapter->bounce_buffer_size,
451 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
452 dev_err(dev, "Couldn't map tx bounce buffer\n");
453 goto bounce_map_failed;
455 replenish_pools(adapter);
457 /* We're ready to receive frames, enable the sub-crq interrupts and
458 * set the logical link state to up
460 for (i = 0; i < adapter->req_rx_queues; i++)
461 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
463 for (i = 0; i < adapter->req_tx_queues; i++)
464 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
466 memset(&crq, 0, sizeof(crq));
467 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
468 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
469 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
470 ibmvnic_send_crq(adapter, &crq);
472 netif_start_queue(netdev);
476 kfree(adapter->bounce_buffer);
479 kfree(adapter->tx_pool[i].free_map);
481 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
483 kfree(adapter->tx_pool[i].tx_buff);
484 tx_pool_alloc_failed:
485 for (j = 0; j < i; j++) {
486 kfree(adapter->tx_pool[j].tx_buff);
487 free_long_term_buff(adapter,
488 &adapter->tx_pool[j].long_term_buff);
489 kfree(adapter->tx_pool[j].free_map);
491 kfree(adapter->tx_pool);
492 adapter->tx_pool = NULL;
493 tx_pool_arr_alloc_failed:
495 rx_pool_alloc_failed:
496 for (j = 0; j < i; j++) {
497 free_rx_pool(adapter, &adapter->rx_pool[j]);
498 free_long_term_buff(adapter,
499 &adapter->rx_pool[j].long_term_buff);
501 kfree(adapter->rx_pool);
502 adapter->rx_pool = NULL;
503 rx_pool_arr_alloc_failed:
504 for (i = 0; i < adapter->req_rx_queues; i++)
505 napi_enable(&adapter->napi[i]);
510 static int ibmvnic_close(struct net_device *netdev)
512 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
513 struct device *dev = &adapter->vdev->dev;
514 union ibmvnic_crq crq;
517 adapter->closing = true;
519 for (i = 0; i < adapter->req_rx_queues; i++)
520 napi_disable(&adapter->napi[i]);
522 netif_stop_queue(netdev);
524 if (adapter->bounce_buffer) {
525 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
526 dma_unmap_single(&adapter->vdev->dev,
527 adapter->bounce_buffer_dma,
528 adapter->bounce_buffer_size,
530 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
532 kfree(adapter->bounce_buffer);
533 adapter->bounce_buffer = NULL;
536 memset(&crq, 0, sizeof(crq));
537 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
538 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
539 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
540 ibmvnic_send_crq(adapter, &crq);
542 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
544 kfree(adapter->tx_pool[i].tx_buff);
545 free_long_term_buff(adapter,
546 &adapter->tx_pool[i].long_term_buff);
547 kfree(adapter->tx_pool[i].free_map);
549 kfree(adapter->tx_pool);
550 adapter->tx_pool = NULL;
552 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
554 free_rx_pool(adapter, &adapter->rx_pool[i]);
555 free_long_term_buff(adapter,
556 &adapter->rx_pool[i].long_term_buff);
558 kfree(adapter->rx_pool);
559 adapter->rx_pool = NULL;
561 adapter->closing = false;
567 * build_hdr_data - creates L2/L3/L4 header data buffer
568 * @hdr_field - bitfield determining needed headers
569 * @skb - socket buffer
570 * @hdr_len - array of header lengths
571 * @tot_len - total length of data
573 * Reads hdr_field to determine which headers are needed by firmware.
574 * Builds a buffer containing these headers. Saves individual header
575 * lengths and total buffer length to be used to build descriptors.
577 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
578 int *hdr_len, u8 *hdr_data)
583 hdr_len[0] = sizeof(struct ethhdr);
585 if (skb->protocol == htons(ETH_P_IP)) {
586 hdr_len[1] = ip_hdr(skb)->ihl * 4;
587 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
588 hdr_len[2] = tcp_hdrlen(skb);
589 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
590 hdr_len[2] = sizeof(struct udphdr);
591 } else if (skb->protocol == htons(ETH_P_IPV6)) {
592 hdr_len[1] = sizeof(struct ipv6hdr);
593 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
594 hdr_len[2] = tcp_hdrlen(skb);
595 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
596 hdr_len[2] = sizeof(struct udphdr);
599 memset(hdr_data, 0, 120);
600 if ((hdr_field >> 6) & 1) {
601 hdr = skb_mac_header(skb);
602 memcpy(hdr_data, hdr, hdr_len[0]);
606 if ((hdr_field >> 5) & 1) {
607 hdr = skb_network_header(skb);
608 memcpy(hdr_data + len, hdr, hdr_len[1]);
612 if ((hdr_field >> 4) & 1) {
613 hdr = skb_transport_header(skb);
614 memcpy(hdr_data + len, hdr, hdr_len[2]);
621 * create_hdr_descs - create header and header extension descriptors
622 * @hdr_field - bitfield determining needed headers
623 * @data - buffer containing header data
624 * @len - length of data buffer
625 * @hdr_len - array of individual header lengths
626 * @scrq_arr - descriptor array
628 * Creates header and, if needed, header extension descriptors and
629 * places them in a descriptor array, scrq_arr
632 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
633 union sub_crq *scrq_arr)
635 union sub_crq hdr_desc;
640 while (tmp_len > 0) {
641 cur = hdr_data + len - tmp_len;
643 memset(&hdr_desc, 0, sizeof(hdr_desc));
644 if (cur != hdr_data) {
645 data = hdr_desc.hdr_ext.data;
646 tmp = tmp_len > 29 ? 29 : tmp_len;
647 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
648 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
649 hdr_desc.hdr_ext.len = tmp;
651 data = hdr_desc.hdr.data;
652 tmp = tmp_len > 24 ? 24 : tmp_len;
653 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
654 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
655 hdr_desc.hdr.len = tmp;
656 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
657 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
658 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
659 hdr_desc.hdr.flag = hdr_field << 1;
661 memcpy(data, cur, tmp);
663 *scrq_arr = hdr_desc;
669 * build_hdr_descs_arr - build a header descriptor array
670 * @skb - socket buffer
671 * @num_entries - number of descriptors to be sent
672 * @subcrq - first TX descriptor
673 * @hdr_field - bit field determining which headers will be sent
675 * This function will build a TX descriptor array with applicable
676 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
679 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
680 int *num_entries, u8 hdr_field)
682 int hdr_len[3] = {0, 0, 0};
684 u8 *hdr_data = txbuff->hdr_data;
686 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
691 num_entries += len % 29 ? len / 29 + 1 : len / 29;
692 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
693 txbuff->indir_arr + 1);
696 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
698 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
699 int queue_num = skb_get_queue_mapping(skb);
700 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
701 struct device *dev = &adapter->vdev->dev;
702 struct ibmvnic_tx_buff *tx_buff = NULL;
703 struct ibmvnic_tx_pool *tx_pool;
704 unsigned int tx_send_failed = 0;
705 unsigned int tx_map_failed = 0;
706 unsigned int tx_dropped = 0;
707 unsigned int tx_packets = 0;
708 unsigned int tx_bytes = 0;
709 dma_addr_t data_dma_addr;
710 struct netdev_queue *txq;
711 bool used_bounce = false;
712 unsigned long lpar_rc;
713 union sub_crq tx_crq;
721 tx_pool = &adapter->tx_pool[queue_num];
722 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
723 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
724 be32_to_cpu(adapter->login_rsp_buf->
725 off_txsubm_subcrqs));
726 if (adapter->migrated) {
729 ret = NETDEV_TX_BUSY;
733 index = tx_pool->free_map[tx_pool->consumer_index];
734 offset = index * adapter->req_mtu;
735 dst = tx_pool->long_term_buff.buff + offset;
736 memset(dst, 0, adapter->req_mtu);
737 skb_copy_from_linear_data(skb, dst, skb->len);
738 data_dma_addr = tx_pool->long_term_buff.addr + offset;
740 tx_pool->consumer_index =
741 (tx_pool->consumer_index + 1) %
742 adapter->max_tx_entries_per_subcrq;
744 tx_buff = &tx_pool->tx_buff[index];
746 tx_buff->data_dma[0] = data_dma_addr;
747 tx_buff->data_len[0] = skb->len;
748 tx_buff->index = index;
749 tx_buff->pool_index = queue_num;
750 tx_buff->last_frag = true;
751 tx_buff->used_bounce = used_bounce;
753 memset(&tx_crq, 0, sizeof(tx_crq));
754 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
755 tx_crq.v1.type = IBMVNIC_TX_DESC;
756 tx_crq.v1.n_crq_elem = 1;
758 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
759 tx_crq.v1.correlator = cpu_to_be32(index);
760 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
761 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
762 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
764 if (adapter->vlan_header_insertion) {
765 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
766 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
769 if (skb->protocol == htons(ETH_P_IP)) {
770 if (ip_hdr(skb)->version == 4)
771 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
772 else if (ip_hdr(skb)->version == 6)
773 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
775 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
776 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
777 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
781 if (skb->ip_summed == CHECKSUM_PARTIAL) {
782 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
785 /* determine if l2/3/4 headers are sent to firmware */
786 if ((*hdrs >> 7) & 1 &&
787 (skb->protocol == htons(ETH_P_IP) ||
788 skb->protocol == htons(ETH_P_IPV6))) {
789 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
790 tx_crq.v1.n_crq_elem = num_entries;
791 tx_buff->indir_arr[0] = tx_crq;
792 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
793 sizeof(tx_buff->indir_arr),
795 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
796 if (!firmware_has_feature(FW_FEATURE_CMO))
797 dev_err(dev, "tx: unable to map descriptor array\n");
800 ret = NETDEV_TX_BUSY;
803 lpar_rc = send_subcrq_indirect(adapter, handle_array[0],
804 (u64)tx_buff->indir_dma,
807 lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
809 if (lpar_rc != H_SUCCESS) {
810 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
812 if (tx_pool->consumer_index == 0)
813 tx_pool->consumer_index =
814 adapter->max_tx_entries_per_subcrq - 1;
816 tx_pool->consumer_index--;
820 ret = NETDEV_TX_BUSY;
824 tx_bytes += skb->len;
825 txq->trans_start = jiffies;
829 netdev->stats.tx_dropped += tx_dropped;
830 netdev->stats.tx_bytes += tx_bytes;
831 netdev->stats.tx_packets += tx_packets;
832 adapter->tx_send_failed += tx_send_failed;
833 adapter->tx_map_failed += tx_map_failed;
838 static void ibmvnic_set_multi(struct net_device *netdev)
840 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
841 struct netdev_hw_addr *ha;
842 union ibmvnic_crq crq;
844 memset(&crq, 0, sizeof(crq));
845 crq.request_capability.first = IBMVNIC_CRQ_CMD;
846 crq.request_capability.cmd = REQUEST_CAPABILITY;
848 if (netdev->flags & IFF_PROMISC) {
849 if (!adapter->promisc_supported)
852 if (netdev->flags & IFF_ALLMULTI) {
853 /* Accept all multicast */
854 memset(&crq, 0, sizeof(crq));
855 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
856 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
857 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
858 ibmvnic_send_crq(adapter, &crq);
859 } else if (netdev_mc_empty(netdev)) {
860 /* Reject all multicast */
861 memset(&crq, 0, sizeof(crq));
862 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
863 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
864 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
865 ibmvnic_send_crq(adapter, &crq);
867 /* Accept one or more multicast(s) */
868 netdev_for_each_mc_addr(ha, netdev) {
869 memset(&crq, 0, sizeof(crq));
870 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
871 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
872 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
873 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
875 ibmvnic_send_crq(adapter, &crq);
881 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
883 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
884 struct sockaddr *addr = p;
885 union ibmvnic_crq crq;
887 if (!is_valid_ether_addr(addr->sa_data))
888 return -EADDRNOTAVAIL;
890 memset(&crq, 0, sizeof(crq));
891 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
892 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
893 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
894 ibmvnic_send_crq(adapter, &crq);
895 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
899 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
901 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
903 if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
906 netdev->mtu = new_mtu;
910 static void ibmvnic_tx_timeout(struct net_device *dev)
912 struct ibmvnic_adapter *adapter = netdev_priv(dev);
915 /* Adapter timed out, resetting it */
916 release_sub_crqs(adapter);
917 rc = ibmvnic_reset_crq(adapter);
919 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
921 ibmvnic_send_crq_init(adapter);
924 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
925 struct ibmvnic_rx_buff *rx_buff)
927 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
931 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
932 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
934 atomic_dec(&pool->available);
937 static int ibmvnic_poll(struct napi_struct *napi, int budget)
939 struct net_device *netdev = napi->dev;
940 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
941 int scrq_num = (int)(napi - adapter->napi);
942 int frames_processed = 0;
944 while (frames_processed < budget) {
946 struct ibmvnic_rx_buff *rx_buff;
952 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
954 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
956 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
958 /* do error checking */
959 if (next->rx_comp.rc) {
960 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
962 next->rx_comp.first = 0;
963 remove_buff_from_pool(adapter, rx_buff);
967 length = be32_to_cpu(next->rx_comp.len);
968 offset = be16_to_cpu(next->rx_comp.off_frame_data);
969 flags = next->rx_comp.flags;
971 skb_copy_to_linear_data(skb, rx_buff->data + offset,
973 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
975 next->rx_comp.first = 0;
976 remove_buff_from_pool(adapter, rx_buff);
978 skb_put(skb, length);
979 skb->protocol = eth_type_trans(skb, netdev);
981 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
982 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
983 skb->ip_summed = CHECKSUM_UNNECESSARY;
987 napi_gro_receive(napi, skb); /* send it up */
988 netdev->stats.rx_packets++;
989 netdev->stats.rx_bytes += length;
992 replenish_pools(adapter);
994 if (frames_processed < budget) {
995 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
997 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
998 napi_reschedule(napi)) {
999 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1003 return frames_processed;
1006 #ifdef CONFIG_NET_POLL_CONTROLLER
1007 static void ibmvnic_netpoll_controller(struct net_device *dev)
1009 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1012 replenish_pools(netdev_priv(dev));
1013 for (i = 0; i < adapter->req_rx_queues; i++)
1014 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1015 adapter->rx_scrq[i]);
1019 static const struct net_device_ops ibmvnic_netdev_ops = {
1020 .ndo_open = ibmvnic_open,
1021 .ndo_stop = ibmvnic_close,
1022 .ndo_start_xmit = ibmvnic_xmit,
1023 .ndo_set_rx_mode = ibmvnic_set_multi,
1024 .ndo_set_mac_address = ibmvnic_set_mac,
1025 .ndo_validate_addr = eth_validate_addr,
1026 .ndo_change_mtu = ibmvnic_change_mtu,
1027 .ndo_tx_timeout = ibmvnic_tx_timeout,
1028 #ifdef CONFIG_NET_POLL_CONTROLLER
1029 .ndo_poll_controller = ibmvnic_netpoll_controller,
1033 /* ethtool functions */
1035 static int ibmvnic_get_settings(struct net_device *netdev,
1036 struct ethtool_cmd *cmd)
1038 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1040 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1042 ethtool_cmd_speed_set(cmd, SPEED_1000);
1043 cmd->duplex = DUPLEX_FULL;
1044 cmd->port = PORT_FIBRE;
1045 cmd->phy_address = 0;
1046 cmd->transceiver = XCVR_INTERNAL;
1047 cmd->autoneg = AUTONEG_ENABLE;
1053 static void ibmvnic_get_drvinfo(struct net_device *dev,
1054 struct ethtool_drvinfo *info)
1056 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1057 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1060 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1062 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1064 return adapter->msg_enable;
1067 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1069 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1071 adapter->msg_enable = data;
1074 static u32 ibmvnic_get_link(struct net_device *netdev)
1076 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1078 /* Don't need to send a query because we request a logical link up at
1079 * init and then we wait for link state indications
1081 return adapter->logical_link_state;
1084 static void ibmvnic_get_ringparam(struct net_device *netdev,
1085 struct ethtool_ringparam *ring)
1087 ring->rx_max_pending = 0;
1088 ring->tx_max_pending = 0;
1089 ring->rx_mini_max_pending = 0;
1090 ring->rx_jumbo_max_pending = 0;
1091 ring->rx_pending = 0;
1092 ring->tx_pending = 0;
1093 ring->rx_mini_pending = 0;
1094 ring->rx_jumbo_pending = 0;
1097 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1101 if (stringset != ETH_SS_STATS)
1104 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1105 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1108 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1112 return ARRAY_SIZE(ibmvnic_stats);
1118 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1119 struct ethtool_stats *stats, u64 *data)
1121 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1122 union ibmvnic_crq crq;
1125 memset(&crq, 0, sizeof(crq));
1126 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1127 crq.request_statistics.cmd = REQUEST_STATISTICS;
1128 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1129 crq.request_statistics.len =
1130 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1131 ibmvnic_send_crq(adapter, &crq);
1133 /* Wait for data to be written */
1134 init_completion(&adapter->stats_done);
1135 wait_for_completion(&adapter->stats_done);
1137 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1138 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1141 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1142 .get_settings = ibmvnic_get_settings,
1143 .get_drvinfo = ibmvnic_get_drvinfo,
1144 .get_msglevel = ibmvnic_get_msglevel,
1145 .set_msglevel = ibmvnic_set_msglevel,
1146 .get_link = ibmvnic_get_link,
1147 .get_ringparam = ibmvnic_get_ringparam,
1148 .get_strings = ibmvnic_get_strings,
1149 .get_sset_count = ibmvnic_get_sset_count,
1150 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1153 /* Routines for managing CRQs/sCRQs */
1155 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1156 struct ibmvnic_sub_crq_queue *scrq)
1158 struct device *dev = &adapter->vdev->dev;
1161 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1163 /* Close the sub-crqs */
1165 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1166 adapter->vdev->unit_address,
1168 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1170 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1172 free_pages((unsigned long)scrq->msgs, 2);
1176 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1179 struct device *dev = &adapter->vdev->dev;
1180 struct ibmvnic_sub_crq_queue *scrq;
1183 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1187 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1188 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1190 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1191 goto zero_page_failed;
1194 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1196 if (dma_mapping_error(dev, scrq->msg_token)) {
1197 dev_warn(dev, "Couldn't map crq queue messages page\n");
1201 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1202 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1204 if (rc == H_RESOURCE)
1205 rc = ibmvnic_reset_crq(adapter);
1207 if (rc == H_CLOSED) {
1208 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1210 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1214 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1215 if (scrq->irq == NO_IRQ) {
1216 dev_err(dev, "Error mapping irq\n");
1217 goto map_irq_failed;
1220 scrq->adapter = adapter;
1221 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1223 scrq->rx_skb_top = NULL;
1224 spin_lock_init(&scrq->lock);
1226 netdev_dbg(adapter->netdev,
1227 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1228 scrq->crq_num, scrq->hw_irq, scrq->irq);
1234 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1235 adapter->vdev->unit_address,
1237 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1239 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1242 free_pages((unsigned long)scrq->msgs, 2);
1249 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1253 if (adapter->tx_scrq) {
1254 for (i = 0; i < adapter->req_tx_queues; i++)
1255 if (adapter->tx_scrq[i]) {
1256 free_irq(adapter->tx_scrq[i]->irq,
1257 adapter->tx_scrq[i]);
1258 release_sub_crq_queue(adapter,
1259 adapter->tx_scrq[i]);
1261 adapter->tx_scrq = NULL;
1264 if (adapter->rx_scrq) {
1265 for (i = 0; i < adapter->req_rx_queues; i++)
1266 if (adapter->rx_scrq[i]) {
1267 free_irq(adapter->rx_scrq[i]->irq,
1268 adapter->rx_scrq[i]);
1269 release_sub_crq_queue(adapter,
1270 adapter->rx_scrq[i]);
1272 adapter->rx_scrq = NULL;
1275 adapter->requested_caps = 0;
1278 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1279 struct ibmvnic_sub_crq_queue *scrq)
1281 struct device *dev = &adapter->vdev->dev;
1284 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1285 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1287 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1292 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1293 struct ibmvnic_sub_crq_queue *scrq)
1295 struct device *dev = &adapter->vdev->dev;
1298 if (scrq->hw_irq > 0x100000000ULL) {
1299 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1303 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1304 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1306 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1311 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1312 struct ibmvnic_sub_crq_queue *scrq)
1314 struct device *dev = &adapter->vdev->dev;
1315 struct ibmvnic_tx_buff *txbuff;
1316 union sub_crq *next;
1322 while (pending_scrq(adapter, scrq)) {
1323 unsigned int pool = scrq->pool_index;
1325 next = ibmvnic_next_scrq(adapter, scrq);
1326 for (i = 0; i < next->tx_comp.num_comps; i++) {
1327 if (next->tx_comp.rcs[i]) {
1328 dev_err(dev, "tx error %x\n",
1329 next->tx_comp.rcs[i]);
1332 index = be32_to_cpu(next->tx_comp.correlators[i]);
1333 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1335 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1336 if (!txbuff->data_dma[j])
1339 txbuff->data_dma[j] = 0;
1340 txbuff->used_bounce = false;
1342 /* if sub_crq was sent indirectly */
1343 first = txbuff->indir_arr[0].generic.first;
1344 if (first == IBMVNIC_CRQ_CMD) {
1345 dma_unmap_single(dev, txbuff->indir_dma,
1346 sizeof(txbuff->indir_arr),
1350 if (txbuff->last_frag)
1351 dev_kfree_skb_any(txbuff->skb);
1353 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1354 producer_index] = index;
1355 adapter->tx_pool[pool].producer_index =
1356 (adapter->tx_pool[pool].producer_index + 1) %
1357 adapter->max_tx_entries_per_subcrq;
1359 /* remove tx_comp scrq*/
1360 next->tx_comp.first = 0;
1363 enable_scrq_irq(adapter, scrq);
1365 if (pending_scrq(adapter, scrq)) {
1366 disable_scrq_irq(adapter, scrq);
1373 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1375 struct ibmvnic_sub_crq_queue *scrq = instance;
1376 struct ibmvnic_adapter *adapter = scrq->adapter;
1378 disable_scrq_irq(adapter, scrq);
1379 ibmvnic_complete_tx(adapter, scrq);
1384 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1386 struct ibmvnic_sub_crq_queue *scrq = instance;
1387 struct ibmvnic_adapter *adapter = scrq->adapter;
1389 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1390 disable_scrq_irq(adapter, scrq);
1391 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1397 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1399 struct device *dev = &adapter->vdev->dev;
1400 struct ibmvnic_sub_crq_queue **allqueues;
1401 int registered_queues = 0;
1402 union ibmvnic_crq crq;
1409 /* Sub-CRQ entries are 32 byte long */
1410 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1412 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1413 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1414 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1415 goto allqueues_failed;
1418 /* Get the minimum between the queried max and the entries
1419 * that fit in our PAGE_SIZE
1421 adapter->req_tx_entries_per_subcrq =
1422 adapter->max_tx_entries_per_subcrq > entries_page ?
1423 entries_page : adapter->max_tx_entries_per_subcrq;
1424 adapter->req_rx_add_entries_per_subcrq =
1425 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1426 entries_page : adapter->max_rx_add_entries_per_subcrq;
1428 /* Choosing the maximum number of queues supported by firmware*/
1429 adapter->req_tx_queues = adapter->min_tx_queues;
1430 adapter->req_rx_queues = adapter->min_rx_queues;
1431 adapter->req_rx_add_queues = adapter->min_rx_add_queues;
1433 adapter->req_mtu = adapter->max_mtu;
1436 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1438 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1440 goto allqueues_failed;
1442 for (i = 0; i < total_queues; i++) {
1443 allqueues[i] = init_sub_crq_queue(adapter);
1444 if (!allqueues[i]) {
1445 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1448 registered_queues++;
1451 /* Make sure we were able to register the minimum number of queues */
1452 if (registered_queues <
1453 adapter->min_tx_queues + adapter->min_rx_queues) {
1454 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1458 /* Distribute the failed allocated queues*/
1459 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1460 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1463 if (adapter->req_rx_queues > adapter->min_rx_queues)
1464 adapter->req_rx_queues--;
1469 if (adapter->req_tx_queues > adapter->min_tx_queues)
1470 adapter->req_tx_queues--;
1477 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1478 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1479 if (!adapter->tx_scrq)
1482 for (i = 0; i < adapter->req_tx_queues; i++) {
1483 adapter->tx_scrq[i] = allqueues[i];
1484 adapter->tx_scrq[i]->pool_index = i;
1485 rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1486 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1488 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1489 adapter->tx_scrq[i]->irq, rc);
1490 goto req_tx_irq_failed;
1494 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1495 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1496 if (!adapter->rx_scrq)
1499 for (i = 0; i < adapter->req_rx_queues; i++) {
1500 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1501 adapter->rx_scrq[i]->scrq_num = i;
1502 rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1503 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1505 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1506 adapter->rx_scrq[i]->irq, rc);
1507 goto req_rx_irq_failed;
1511 memset(&crq, 0, sizeof(crq));
1512 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1513 crq.request_capability.cmd = REQUEST_CAPABILITY;
1515 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1516 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1517 ibmvnic_send_crq(adapter, &crq);
1519 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1520 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1521 ibmvnic_send_crq(adapter, &crq);
1523 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1524 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1525 ibmvnic_send_crq(adapter, &crq);
1527 crq.request_capability.capability =
1528 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1529 crq.request_capability.number =
1530 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1531 ibmvnic_send_crq(adapter, &crq);
1533 crq.request_capability.capability =
1534 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1535 crq.request_capability.number =
1536 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1537 ibmvnic_send_crq(adapter, &crq);
1539 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1540 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1541 ibmvnic_send_crq(adapter, &crq);
1543 if (adapter->netdev->flags & IFF_PROMISC) {
1544 if (adapter->promisc_supported) {
1545 crq.request_capability.capability =
1546 cpu_to_be16(PROMISC_REQUESTED);
1547 crq.request_capability.number = cpu_to_be64(1);
1548 ibmvnic_send_crq(adapter, &crq);
1551 crq.request_capability.capability =
1552 cpu_to_be16(PROMISC_REQUESTED);
1553 crq.request_capability.number = cpu_to_be64(0);
1554 ibmvnic_send_crq(adapter, &crq);
1562 for (j = 0; j < i; j++)
1563 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1564 i = adapter->req_tx_queues;
1566 for (j = 0; j < i; j++)
1567 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1568 kfree(adapter->rx_scrq);
1569 adapter->rx_scrq = NULL;
1571 kfree(adapter->tx_scrq);
1572 adapter->tx_scrq = NULL;
1574 for (i = 0; i < registered_queues; i++)
1575 release_sub_crq_queue(adapter, allqueues[i]);
1578 ibmvnic_remove(adapter->vdev);
1581 static int pending_scrq(struct ibmvnic_adapter *adapter,
1582 struct ibmvnic_sub_crq_queue *scrq)
1584 union sub_crq *entry = &scrq->msgs[scrq->cur];
1586 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1592 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1593 struct ibmvnic_sub_crq_queue *scrq)
1595 union sub_crq *entry;
1596 unsigned long flags;
1598 spin_lock_irqsave(&scrq->lock, flags);
1599 entry = &scrq->msgs[scrq->cur];
1600 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1601 if (++scrq->cur == scrq->size)
1606 spin_unlock_irqrestore(&scrq->lock, flags);
1611 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1613 struct ibmvnic_crq_queue *queue = &adapter->crq;
1614 union ibmvnic_crq *crq;
1616 crq = &queue->msgs[queue->cur];
1617 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1618 if (++queue->cur == queue->size)
1627 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1628 union sub_crq *sub_crq)
1630 unsigned int ua = adapter->vdev->unit_address;
1631 struct device *dev = &adapter->vdev->dev;
1632 u64 *u64_crq = (u64 *)sub_crq;
1635 netdev_dbg(adapter->netdev,
1636 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1637 (unsigned long int)cpu_to_be64(remote_handle),
1638 (unsigned long int)cpu_to_be64(u64_crq[0]),
1639 (unsigned long int)cpu_to_be64(u64_crq[1]),
1640 (unsigned long int)cpu_to_be64(u64_crq[2]),
1641 (unsigned long int)cpu_to_be64(u64_crq[3]));
1643 /* Make sure the hypervisor sees the complete request */
1646 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1647 cpu_to_be64(remote_handle),
1648 cpu_to_be64(u64_crq[0]),
1649 cpu_to_be64(u64_crq[1]),
1650 cpu_to_be64(u64_crq[2]),
1651 cpu_to_be64(u64_crq[3]));
1655 dev_warn(dev, "CRQ Queue closed\n");
1656 dev_err(dev, "Send error (rc=%d)\n", rc);
1662 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1663 u64 remote_handle, u64 ioba, u64 num_entries)
1665 unsigned int ua = adapter->vdev->unit_address;
1666 struct device *dev = &adapter->vdev->dev;
1669 /* Make sure the hypervisor sees the complete request */
1671 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1672 cpu_to_be64(remote_handle),
1677 dev_warn(dev, "CRQ Queue closed\n");
1678 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1684 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1685 union ibmvnic_crq *crq)
1687 unsigned int ua = adapter->vdev->unit_address;
1688 struct device *dev = &adapter->vdev->dev;
1689 u64 *u64_crq = (u64 *)crq;
1692 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1693 (unsigned long int)cpu_to_be64(u64_crq[0]),
1694 (unsigned long int)cpu_to_be64(u64_crq[1]));
1696 /* Make sure the hypervisor sees the complete request */
1699 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1700 cpu_to_be64(u64_crq[0]),
1701 cpu_to_be64(u64_crq[1]));
1705 dev_warn(dev, "CRQ Queue closed\n");
1706 dev_warn(dev, "Send error (rc=%d)\n", rc);
1712 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1714 union ibmvnic_crq crq;
1716 memset(&crq, 0, sizeof(crq));
1717 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1718 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1719 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1721 return ibmvnic_send_crq(adapter, &crq);
1724 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1726 union ibmvnic_crq crq;
1728 memset(&crq, 0, sizeof(crq));
1729 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1730 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1731 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1733 return ibmvnic_send_crq(adapter, &crq);
1736 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1738 union ibmvnic_crq crq;
1740 memset(&crq, 0, sizeof(crq));
1741 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1742 crq.version_exchange.cmd = VERSION_EXCHANGE;
1743 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1745 return ibmvnic_send_crq(adapter, &crq);
1748 static void send_login(struct ibmvnic_adapter *adapter)
1750 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1751 struct ibmvnic_login_buffer *login_buffer;
1752 struct ibmvnic_inflight_cmd *inflight_cmd;
1753 struct device *dev = &adapter->vdev->dev;
1754 dma_addr_t rsp_buffer_token;
1755 dma_addr_t buffer_token;
1756 size_t rsp_buffer_size;
1757 union ibmvnic_crq crq;
1758 unsigned long flags;
1765 sizeof(struct ibmvnic_login_buffer) +
1766 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1768 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1770 goto buf_alloc_failed;
1772 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1774 if (dma_mapping_error(dev, buffer_token)) {
1775 dev_err(dev, "Couldn't map login buffer\n");
1776 goto buf_map_failed;
1780 sizeof(struct ibmvnic_login_rsp_buffer) +
1781 sizeof(u64) * (adapter->req_tx_queues +
1782 adapter->req_rx_queues *
1783 adapter->req_rx_add_queues + adapter->
1784 req_rx_add_queues) +
1785 sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
1787 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1788 if (!login_rsp_buffer)
1789 goto buf_rsp_alloc_failed;
1791 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1792 rsp_buffer_size, DMA_FROM_DEVICE);
1793 if (dma_mapping_error(dev, rsp_buffer_token)) {
1794 dev_err(dev, "Couldn't map login rsp buffer\n");
1795 goto buf_rsp_map_failed;
1797 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1798 if (!inflight_cmd) {
1799 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1800 goto inflight_alloc_failed;
1802 adapter->login_buf = login_buffer;
1803 adapter->login_buf_token = buffer_token;
1804 adapter->login_buf_sz = buffer_size;
1805 adapter->login_rsp_buf = login_rsp_buffer;
1806 adapter->login_rsp_buf_token = rsp_buffer_token;
1807 adapter->login_rsp_buf_sz = rsp_buffer_size;
1809 login_buffer->len = cpu_to_be32(buffer_size);
1810 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1811 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1812 login_buffer->off_txcomp_subcrqs =
1813 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1814 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1815 login_buffer->off_rxcomp_subcrqs =
1816 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1817 sizeof(u64) * adapter->req_tx_queues);
1818 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1819 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1821 tx_list_p = (__be64 *)((char *)login_buffer +
1822 sizeof(struct ibmvnic_login_buffer));
1823 rx_list_p = (__be64 *)((char *)login_buffer +
1824 sizeof(struct ibmvnic_login_buffer) +
1825 sizeof(u64) * adapter->req_tx_queues);
1827 for (i = 0; i < adapter->req_tx_queues; i++) {
1828 if (adapter->tx_scrq[i]) {
1829 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1834 for (i = 0; i < adapter->req_rx_queues; i++) {
1835 if (adapter->rx_scrq[i]) {
1836 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1841 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1842 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1843 netdev_dbg(adapter->netdev, "%016lx\n",
1844 ((unsigned long int *)(adapter->login_buf))[i]);
1847 memset(&crq, 0, sizeof(crq));
1848 crq.login.first = IBMVNIC_CRQ_CMD;
1849 crq.login.cmd = LOGIN;
1850 crq.login.ioba = cpu_to_be32(buffer_token);
1851 crq.login.len = cpu_to_be32(buffer_size);
1853 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1855 spin_lock_irqsave(&adapter->inflight_lock, flags);
1856 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1857 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1859 ibmvnic_send_crq(adapter, &crq);
1863 inflight_alloc_failed:
1864 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1867 kfree(login_rsp_buffer);
1868 buf_rsp_alloc_failed:
1869 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1871 kfree(login_buffer);
1876 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1879 union ibmvnic_crq crq;
1881 memset(&crq, 0, sizeof(crq));
1882 crq.request_map.first = IBMVNIC_CRQ_CMD;
1883 crq.request_map.cmd = REQUEST_MAP;
1884 crq.request_map.map_id = map_id;
1885 crq.request_map.ioba = cpu_to_be32(addr);
1886 crq.request_map.len = cpu_to_be32(len);
1887 ibmvnic_send_crq(adapter, &crq);
1890 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1892 union ibmvnic_crq crq;
1894 memset(&crq, 0, sizeof(crq));
1895 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1896 crq.request_unmap.cmd = REQUEST_UNMAP;
1897 crq.request_unmap.map_id = map_id;
1898 ibmvnic_send_crq(adapter, &crq);
1901 static void send_map_query(struct ibmvnic_adapter *adapter)
1903 union ibmvnic_crq crq;
1905 memset(&crq, 0, sizeof(crq));
1906 crq.query_map.first = IBMVNIC_CRQ_CMD;
1907 crq.query_map.cmd = QUERY_MAP;
1908 ibmvnic_send_crq(adapter, &crq);
1911 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1912 static void send_cap_queries(struct ibmvnic_adapter *adapter)
1914 union ibmvnic_crq crq;
1916 atomic_set(&adapter->running_cap_queries, 0);
1917 memset(&crq, 0, sizeof(crq));
1918 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1919 crq.query_capability.cmd = QUERY_CAPABILITY;
1921 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1922 atomic_inc(&adapter->running_cap_queries);
1923 ibmvnic_send_crq(adapter, &crq);
1925 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1926 atomic_inc(&adapter->running_cap_queries);
1927 ibmvnic_send_crq(adapter, &crq);
1929 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1930 atomic_inc(&adapter->running_cap_queries);
1931 ibmvnic_send_crq(adapter, &crq);
1933 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1934 atomic_inc(&adapter->running_cap_queries);
1935 ibmvnic_send_crq(adapter, &crq);
1937 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1938 atomic_inc(&adapter->running_cap_queries);
1939 ibmvnic_send_crq(adapter, &crq);
1941 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1942 atomic_inc(&adapter->running_cap_queries);
1943 ibmvnic_send_crq(adapter, &crq);
1945 crq.query_capability.capability =
1946 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1947 atomic_inc(&adapter->running_cap_queries);
1948 ibmvnic_send_crq(adapter, &crq);
1950 crq.query_capability.capability =
1951 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1952 atomic_inc(&adapter->running_cap_queries);
1953 ibmvnic_send_crq(adapter, &crq);
1955 crq.query_capability.capability =
1956 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1957 atomic_inc(&adapter->running_cap_queries);
1958 ibmvnic_send_crq(adapter, &crq);
1960 crq.query_capability.capability =
1961 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
1962 atomic_inc(&adapter->running_cap_queries);
1963 ibmvnic_send_crq(adapter, &crq);
1965 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
1966 atomic_inc(&adapter->running_cap_queries);
1967 ibmvnic_send_crq(adapter, &crq);
1969 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
1970 atomic_inc(&adapter->running_cap_queries);
1971 ibmvnic_send_crq(adapter, &crq);
1973 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
1974 atomic_inc(&adapter->running_cap_queries);
1975 ibmvnic_send_crq(adapter, &crq);
1977 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
1978 atomic_inc(&adapter->running_cap_queries);
1979 ibmvnic_send_crq(adapter, &crq);
1981 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
1982 atomic_inc(&adapter->running_cap_queries);
1983 ibmvnic_send_crq(adapter, &crq);
1985 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
1986 atomic_inc(&adapter->running_cap_queries);
1987 ibmvnic_send_crq(adapter, &crq);
1989 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
1990 atomic_inc(&adapter->running_cap_queries);
1991 ibmvnic_send_crq(adapter, &crq);
1993 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
1994 atomic_inc(&adapter->running_cap_queries);
1995 ibmvnic_send_crq(adapter, &crq);
1997 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
1998 atomic_inc(&adapter->running_cap_queries);
1999 ibmvnic_send_crq(adapter, &crq);
2001 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2002 atomic_inc(&adapter->running_cap_queries);
2003 ibmvnic_send_crq(adapter, &crq);
2005 crq.query_capability.capability =
2006 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2007 atomic_inc(&adapter->running_cap_queries);
2008 ibmvnic_send_crq(adapter, &crq);
2010 crq.query_capability.capability =
2011 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2012 atomic_inc(&adapter->running_cap_queries);
2013 ibmvnic_send_crq(adapter, &crq);
2015 crq.query_capability.capability =
2016 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2017 atomic_inc(&adapter->running_cap_queries);
2018 ibmvnic_send_crq(adapter, &crq);
2020 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2021 atomic_inc(&adapter->running_cap_queries);
2022 ibmvnic_send_crq(adapter, &crq);
2025 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2027 struct device *dev = &adapter->vdev->dev;
2028 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2029 union ibmvnic_crq crq;
2032 dma_unmap_single(dev, adapter->ip_offload_tok,
2033 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2035 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2036 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2037 netdev_dbg(adapter->netdev, "%016lx\n",
2038 ((unsigned long int *)(buf))[i]);
2040 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2041 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2042 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2043 buf->tcp_ipv4_chksum);
2044 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2045 buf->tcp_ipv6_chksum);
2046 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2047 buf->udp_ipv4_chksum);
2048 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2049 buf->udp_ipv6_chksum);
2050 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2051 buf->large_tx_ipv4);
2052 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2053 buf->large_tx_ipv6);
2054 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2055 buf->large_rx_ipv4);
2056 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2057 buf->large_rx_ipv6);
2058 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2059 buf->max_ipv4_header_size);
2060 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2061 buf->max_ipv6_header_size);
2062 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2063 buf->max_tcp_header_size);
2064 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2065 buf->max_udp_header_size);
2066 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2067 buf->max_large_tx_size);
2068 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2069 buf->max_large_rx_size);
2070 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2071 buf->ipv6_extension_header);
2072 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2073 buf->tcp_pseudosum_req);
2074 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2075 buf->num_ipv6_ext_headers);
2076 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2077 buf->off_ipv6_ext_headers);
2079 adapter->ip_offload_ctrl_tok =
2080 dma_map_single(dev, &adapter->ip_offload_ctrl,
2081 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2083 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2084 dev_err(dev, "Couldn't map ip offload control buffer\n");
2088 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2089 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2090 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2091 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2092 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2094 /* large_tx/rx disabled for now, additional features needed */
2095 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2096 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2097 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2098 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2100 adapter->netdev->features = NETIF_F_GSO;
2102 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2103 adapter->netdev->features |= NETIF_F_IP_CSUM;
2105 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2106 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2108 memset(&crq, 0, sizeof(crq));
2109 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2110 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2111 crq.control_ip_offload.len =
2112 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2113 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2114 ibmvnic_send_crq(adapter, &crq);
2117 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2118 struct ibmvnic_adapter *adapter)
2120 struct device *dev = &adapter->vdev->dev;
2121 struct ibmvnic_error_buff *error_buff;
2122 unsigned long flags;
2126 if (!crq->request_error_rsp.rc.code) {
2127 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2128 crq->request_error_rsp.rc.code);
2132 spin_lock_irqsave(&adapter->error_list_lock, flags);
2133 list_for_each_entry(error_buff, &adapter->errors, list)
2134 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2136 list_del(&error_buff->list);
2139 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2142 dev_err(dev, "Couldn't find error id %x\n",
2143 crq->request_error_rsp.error_id);
2147 dev_err(dev, "Detailed info for error id %x:",
2148 crq->request_error_rsp.error_id);
2150 for (i = 0; i < error_buff->len; i++) {
2151 pr_cont("%02x", (int)error_buff->buff[i]);
2157 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2159 kfree(error_buff->buff);
2163 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2164 struct ibmvnic_adapter *adapter)
2166 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2167 struct ibmvnic_inflight_cmd *inflight_cmd;
2168 struct device *dev = &adapter->vdev->dev;
2169 union ibmvnic_crq newcrq;
2170 unsigned long flags;
2172 /* allocate and map buffer */
2173 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2174 if (!adapter->dump_data) {
2175 complete(&adapter->fw_done);
2179 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2182 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2183 if (!firmware_has_feature(FW_FEATURE_CMO))
2184 dev_err(dev, "Couldn't map dump data\n");
2185 kfree(adapter->dump_data);
2186 complete(&adapter->fw_done);
2190 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2191 if (!inflight_cmd) {
2192 dma_unmap_single(dev, adapter->dump_data_token, len,
2194 kfree(adapter->dump_data);
2195 complete(&adapter->fw_done);
2199 memset(&newcrq, 0, sizeof(newcrq));
2200 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2201 newcrq.request_dump.cmd = REQUEST_DUMP;
2202 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2203 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2205 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2207 spin_lock_irqsave(&adapter->inflight_lock, flags);
2208 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2209 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2211 ibmvnic_send_crq(adapter, &newcrq);
2214 static void handle_error_indication(union ibmvnic_crq *crq,
2215 struct ibmvnic_adapter *adapter)
2217 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2218 struct ibmvnic_inflight_cmd *inflight_cmd;
2219 struct device *dev = &adapter->vdev->dev;
2220 struct ibmvnic_error_buff *error_buff;
2221 union ibmvnic_crq new_crq;
2222 unsigned long flags;
2224 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2225 crq->error_indication.
2226 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2227 crq->error_indication.error_id,
2228 crq->error_indication.error_cause);
2230 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2234 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2235 if (!error_buff->buff) {
2240 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2242 if (dma_mapping_error(dev, error_buff->dma)) {
2243 if (!firmware_has_feature(FW_FEATURE_CMO))
2244 dev_err(dev, "Couldn't map error buffer\n");
2245 kfree(error_buff->buff);
2250 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2251 if (!inflight_cmd) {
2252 dma_unmap_single(dev, error_buff->dma, detail_len,
2254 kfree(error_buff->buff);
2259 error_buff->len = detail_len;
2260 error_buff->error_id = crq->error_indication.error_id;
2262 spin_lock_irqsave(&adapter->error_list_lock, flags);
2263 list_add_tail(&error_buff->list, &adapter->errors);
2264 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2266 memset(&new_crq, 0, sizeof(new_crq));
2267 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2268 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2269 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2270 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2271 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2273 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2275 spin_lock_irqsave(&adapter->inflight_lock, flags);
2276 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2277 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2279 ibmvnic_send_crq(adapter, &new_crq);
2282 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2283 struct ibmvnic_adapter *adapter)
2285 struct net_device *netdev = adapter->netdev;
2286 struct device *dev = &adapter->vdev->dev;
2289 rc = crq->change_mac_addr_rsp.rc.code;
2291 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2294 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2298 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2299 struct ibmvnic_adapter *adapter)
2301 struct device *dev = &adapter->vdev->dev;
2305 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2307 req_value = &adapter->req_tx_queues;
2311 req_value = &adapter->req_rx_queues;
2314 case REQ_RX_ADD_QUEUES:
2315 req_value = &adapter->req_rx_add_queues;
2318 case REQ_TX_ENTRIES_PER_SUBCRQ:
2319 req_value = &adapter->req_tx_entries_per_subcrq;
2320 name = "tx_entries_per_subcrq";
2322 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2323 req_value = &adapter->req_rx_add_entries_per_subcrq;
2324 name = "rx_add_entries_per_subcrq";
2327 req_value = &adapter->req_mtu;
2330 case PROMISC_REQUESTED:
2331 req_value = &adapter->promisc;
2335 dev_err(dev, "Got invalid cap request rsp %d\n",
2336 crq->request_capability.capability);
2340 switch (crq->request_capability_rsp.rc.code) {
2343 case PARTIALSUCCESS:
2344 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2346 (long int)be32_to_cpu(crq->request_capability_rsp.
2348 release_sub_crqs(adapter);
2349 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
2350 complete(&adapter->init_done);
2353 dev_err(dev, "Error %d in request cap rsp\n",
2354 crq->request_capability_rsp.rc.code);
2358 /* Done receiving requested capabilities, query IP offload support */
2359 if (++adapter->requested_caps == 7) {
2360 union ibmvnic_crq newcrq;
2361 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2362 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2363 &adapter->ip_offload_buf;
2365 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2369 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2370 if (!firmware_has_feature(FW_FEATURE_CMO))
2371 dev_err(dev, "Couldn't map offload buffer\n");
2375 memset(&newcrq, 0, sizeof(newcrq));
2376 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2377 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2378 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2379 newcrq.query_ip_offload.ioba =
2380 cpu_to_be32(adapter->ip_offload_tok);
2382 ibmvnic_send_crq(adapter, &newcrq);
2386 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2387 struct ibmvnic_adapter *adapter)
2389 struct device *dev = &adapter->vdev->dev;
2390 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2391 struct ibmvnic_login_buffer *login = adapter->login_buf;
2392 union ibmvnic_crq crq;
2395 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2397 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2398 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2400 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2401 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2402 netdev_dbg(adapter->netdev, "%016lx\n",
2403 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2407 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2408 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2409 adapter->req_rx_add_queues !=
2410 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2411 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2412 ibmvnic_remove(adapter->vdev);
2415 complete(&adapter->init_done);
2417 memset(&crq, 0, sizeof(crq));
2418 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2419 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2420 ibmvnic_send_crq(adapter, &crq);
2425 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2426 struct ibmvnic_adapter *adapter)
2428 struct device *dev = &adapter->vdev->dev;
2429 u8 map_id = crq->request_map_rsp.map_id;
2435 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2436 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2438 rc = crq->request_map_rsp.rc.code;
2440 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2442 /* need to find and zero tx/rx_pool map_id */
2443 for (i = 0; i < tx_subcrqs; i++) {
2444 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2445 adapter->tx_pool[i].long_term_buff.map_id = 0;
2447 for (i = 0; i < rx_subcrqs; i++) {
2448 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2449 adapter->rx_pool[i].long_term_buff.map_id = 0;
2452 complete(&adapter->fw_done);
2455 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2456 struct ibmvnic_adapter *adapter)
2458 struct device *dev = &adapter->vdev->dev;
2461 rc = crq->request_unmap_rsp.rc.code;
2463 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2466 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2467 struct ibmvnic_adapter *adapter)
2469 struct net_device *netdev = adapter->netdev;
2470 struct device *dev = &adapter->vdev->dev;
2473 rc = crq->query_map_rsp.rc.code;
2475 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2478 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2479 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2480 crq->query_map_rsp.free_pages);
2483 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2484 struct ibmvnic_adapter *adapter)
2486 struct net_device *netdev = adapter->netdev;
2487 struct device *dev = &adapter->vdev->dev;
2490 atomic_dec(&adapter->running_cap_queries);
2491 netdev_dbg(netdev, "Outstanding queries: %d\n",
2492 atomic_read(&adapter->running_cap_queries));
2493 rc = crq->query_capability.rc.code;
2495 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2499 switch (be16_to_cpu(crq->query_capability.capability)) {
2501 adapter->min_tx_queues =
2502 be64_to_cpu(crq->query_capability.number);
2503 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2504 adapter->min_tx_queues);
2507 adapter->min_rx_queues =
2508 be64_to_cpu(crq->query_capability.number);
2509 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2510 adapter->min_rx_queues);
2512 case MIN_RX_ADD_QUEUES:
2513 adapter->min_rx_add_queues =
2514 be64_to_cpu(crq->query_capability.number);
2515 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2516 adapter->min_rx_add_queues);
2519 adapter->max_tx_queues =
2520 be64_to_cpu(crq->query_capability.number);
2521 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2522 adapter->max_tx_queues);
2525 adapter->max_rx_queues =
2526 be64_to_cpu(crq->query_capability.number);
2527 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2528 adapter->max_rx_queues);
2530 case MAX_RX_ADD_QUEUES:
2531 adapter->max_rx_add_queues =
2532 be64_to_cpu(crq->query_capability.number);
2533 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2534 adapter->max_rx_add_queues);
2536 case MIN_TX_ENTRIES_PER_SUBCRQ:
2537 adapter->min_tx_entries_per_subcrq =
2538 be64_to_cpu(crq->query_capability.number);
2539 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2540 adapter->min_tx_entries_per_subcrq);
2542 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2543 adapter->min_rx_add_entries_per_subcrq =
2544 be64_to_cpu(crq->query_capability.number);
2545 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2546 adapter->min_rx_add_entries_per_subcrq);
2548 case MAX_TX_ENTRIES_PER_SUBCRQ:
2549 adapter->max_tx_entries_per_subcrq =
2550 be64_to_cpu(crq->query_capability.number);
2551 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2552 adapter->max_tx_entries_per_subcrq);
2554 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2555 adapter->max_rx_add_entries_per_subcrq =
2556 be64_to_cpu(crq->query_capability.number);
2557 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2558 adapter->max_rx_add_entries_per_subcrq);
2560 case TCP_IP_OFFLOAD:
2561 adapter->tcp_ip_offload =
2562 be64_to_cpu(crq->query_capability.number);
2563 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2564 adapter->tcp_ip_offload);
2566 case PROMISC_SUPPORTED:
2567 adapter->promisc_supported =
2568 be64_to_cpu(crq->query_capability.number);
2569 netdev_dbg(netdev, "promisc_supported = %lld\n",
2570 adapter->promisc_supported);
2573 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2574 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2577 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2578 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2580 case MAX_MULTICAST_FILTERS:
2581 adapter->max_multicast_filters =
2582 be64_to_cpu(crq->query_capability.number);
2583 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2584 adapter->max_multicast_filters);
2586 case VLAN_HEADER_INSERTION:
2587 adapter->vlan_header_insertion =
2588 be64_to_cpu(crq->query_capability.number);
2589 if (adapter->vlan_header_insertion)
2590 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2591 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2592 adapter->vlan_header_insertion);
2594 case MAX_TX_SG_ENTRIES:
2595 adapter->max_tx_sg_entries =
2596 be64_to_cpu(crq->query_capability.number);
2597 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2598 adapter->max_tx_sg_entries);
2600 case RX_SG_SUPPORTED:
2601 adapter->rx_sg_supported =
2602 be64_to_cpu(crq->query_capability.number);
2603 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2604 adapter->rx_sg_supported);
2606 case OPT_TX_COMP_SUB_QUEUES:
2607 adapter->opt_tx_comp_sub_queues =
2608 be64_to_cpu(crq->query_capability.number);
2609 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2610 adapter->opt_tx_comp_sub_queues);
2612 case OPT_RX_COMP_QUEUES:
2613 adapter->opt_rx_comp_queues =
2614 be64_to_cpu(crq->query_capability.number);
2615 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2616 adapter->opt_rx_comp_queues);
2618 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2619 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2620 be64_to_cpu(crq->query_capability.number);
2621 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2622 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2624 case OPT_TX_ENTRIES_PER_SUBCRQ:
2625 adapter->opt_tx_entries_per_subcrq =
2626 be64_to_cpu(crq->query_capability.number);
2627 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2628 adapter->opt_tx_entries_per_subcrq);
2630 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2631 adapter->opt_rxba_entries_per_subcrq =
2632 be64_to_cpu(crq->query_capability.number);
2633 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2634 adapter->opt_rxba_entries_per_subcrq);
2636 case TX_RX_DESC_REQ:
2637 adapter->tx_rx_desc_req = crq->query_capability.number;
2638 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2639 adapter->tx_rx_desc_req);
2643 netdev_err(netdev, "Got invalid cap rsp %d\n",
2644 crq->query_capability.capability);
2648 if (atomic_read(&adapter->running_cap_queries) == 0)
2649 complete(&adapter->init_done);
2650 /* We're done querying the capabilities, initialize sub-crqs */
2653 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2654 struct ibmvnic_adapter *adapter)
2656 u8 correlator = crq->control_ras_rsp.correlator;
2657 struct device *dev = &adapter->vdev->dev;
2661 if (crq->control_ras_rsp.rc.code) {
2662 dev_warn(dev, "Control ras failed rc=%d\n",
2663 crq->control_ras_rsp.rc.code);
2667 for (i = 0; i < adapter->ras_comp_num; i++) {
2668 if (adapter->ras_comps[i].correlator == correlator) {
2675 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2679 switch (crq->control_ras_rsp.op) {
2680 case IBMVNIC_TRACE_LEVEL:
2681 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2683 case IBMVNIC_ERROR_LEVEL:
2684 adapter->ras_comps[i].error_check_level =
2685 crq->control_ras.level;
2687 case IBMVNIC_TRACE_PAUSE:
2688 adapter->ras_comp_int[i].paused = 1;
2690 case IBMVNIC_TRACE_RESUME:
2691 adapter->ras_comp_int[i].paused = 0;
2693 case IBMVNIC_TRACE_ON:
2694 adapter->ras_comps[i].trace_on = 1;
2696 case IBMVNIC_TRACE_OFF:
2697 adapter->ras_comps[i].trace_on = 0;
2699 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2700 /* trace_buff_sz is 3 bytes, stuff it into an int */
2701 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2702 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2703 crq->control_ras_rsp.trace_buff_sz[0];
2704 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2705 crq->control_ras_rsp.trace_buff_sz[1];
2706 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2707 crq->control_ras_rsp.trace_buff_sz[2];
2710 dev_err(dev, "invalid op %d on control_ras_rsp",
2711 crq->control_ras_rsp.op);
2715 static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
2717 file->private_data = inode->i_private;
2721 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2724 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2725 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2726 struct device *dev = &adapter->vdev->dev;
2727 struct ibmvnic_fw_trace_entry *trace;
2728 int num = ras_comp_int->num;
2729 union ibmvnic_crq crq;
2730 dma_addr_t trace_tok;
2732 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2736 dma_alloc_coherent(dev,
2737 be32_to_cpu(adapter->ras_comps[num].
2738 trace_buff_size), &trace_tok,
2741 dev_err(dev, "Couldn't alloc trace buffer\n");
2745 memset(&crq, 0, sizeof(crq));
2746 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2747 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2748 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2749 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2750 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2751 ibmvnic_send_crq(adapter, &crq);
2753 init_completion(&adapter->fw_done);
2754 wait_for_completion(&adapter->fw_done);
2756 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2758 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2761 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2763 dma_free_coherent(dev,
2764 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2770 static const struct file_operations trace_ops = {
2771 .owner = THIS_MODULE,
2772 .open = ibmvnic_fw_comp_open,
2776 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2779 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2780 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2781 int num = ras_comp_int->num;
2782 char buff[5]; /* 1 or 0 plus \n and \0 */
2785 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2790 copy_to_user(user_buf, buff, size);
2795 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2796 size_t len, loff_t *ppos)
2798 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2799 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2800 int num = ras_comp_int->num;
2801 union ibmvnic_crq crq;
2803 char buff[9]; /* decimal max int plus \n and \0 */
2805 copy_from_user(buff, user_buf, sizeof(buff));
2806 val = kstrtoul(buff, 10, NULL);
2808 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2810 memset(&crq, 0, sizeof(crq));
2811 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2812 crq.control_ras.cmd = CONTROL_RAS;
2813 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2814 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2815 ibmvnic_send_crq(adapter, &crq);
2820 static const struct file_operations paused_ops = {
2821 .owner = THIS_MODULE,
2822 .open = ibmvnic_fw_comp_open,
2823 .read = paused_read,
2824 .write = paused_write,
2827 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2828 size_t len, loff_t *ppos)
2830 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2831 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2832 int num = ras_comp_int->num;
2833 char buff[5]; /* 1 or 0 plus \n and \0 */
2836 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2841 copy_to_user(user_buf, buff, size);
2846 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2847 size_t len, loff_t *ppos)
2849 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2850 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2851 int num = ras_comp_int->num;
2852 union ibmvnic_crq crq;
2854 char buff[9]; /* decimal max int plus \n and \0 */
2856 copy_from_user(buff, user_buf, sizeof(buff));
2857 val = kstrtoul(buff, 10, NULL);
2859 memset(&crq, 0, sizeof(crq));
2860 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2861 crq.control_ras.cmd = CONTROL_RAS;
2862 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2863 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2868 static const struct file_operations tracing_ops = {
2869 .owner = THIS_MODULE,
2870 .open = ibmvnic_fw_comp_open,
2871 .read = tracing_read,
2872 .write = tracing_write,
2875 static ssize_t error_level_read(struct file *file, char __user *user_buf,
2876 size_t len, loff_t *ppos)
2878 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2879 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2880 int num = ras_comp_int->num;
2881 char buff[5]; /* decimal max char plus \n and \0 */
2884 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2889 copy_to_user(user_buf, buff, size);
2894 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2895 size_t len, loff_t *ppos)
2897 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2898 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2899 int num = ras_comp_int->num;
2900 union ibmvnic_crq crq;
2902 char buff[9]; /* decimal max int plus \n and \0 */
2904 copy_from_user(buff, user_buf, sizeof(buff));
2905 val = kstrtoul(buff, 10, NULL);
2910 memset(&crq, 0, sizeof(crq));
2911 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2912 crq.control_ras.cmd = CONTROL_RAS;
2913 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2914 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2915 crq.control_ras.level = val;
2916 ibmvnic_send_crq(adapter, &crq);
2921 static const struct file_operations error_level_ops = {
2922 .owner = THIS_MODULE,
2923 .open = ibmvnic_fw_comp_open,
2924 .read = error_level_read,
2925 .write = error_level_write,
2928 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2929 size_t len, loff_t *ppos)
2931 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2932 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2933 int num = ras_comp_int->num;
2934 char buff[5]; /* decimal max char plus \n and \0 */
2937 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2941 copy_to_user(user_buf, buff, size);
2946 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2947 size_t len, loff_t *ppos)
2949 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2950 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2951 union ibmvnic_crq crq;
2953 char buff[9]; /* decimal max int plus \n and \0 */
2955 copy_from_user(buff, user_buf, sizeof(buff));
2956 val = kstrtoul(buff, 10, NULL);
2960 memset(&crq, 0, sizeof(crq));
2961 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2962 crq.control_ras.cmd = CONTROL_RAS;
2963 crq.control_ras.correlator =
2964 adapter->ras_comps[ras_comp_int->num].correlator;
2965 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
2966 crq.control_ras.level = val;
2967 ibmvnic_send_crq(adapter, &crq);
2972 static const struct file_operations trace_level_ops = {
2973 .owner = THIS_MODULE,
2974 .open = ibmvnic_fw_comp_open,
2975 .read = trace_level_read,
2976 .write = trace_level_write,
2979 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
2980 size_t len, loff_t *ppos)
2982 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2983 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2984 int num = ras_comp_int->num;
2985 char buff[9]; /* decimal max int plus \n and \0 */
2988 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
2992 copy_to_user(user_buf, buff, size);
2997 static ssize_t trace_buff_size_write(struct file *file,
2998 const char __user *user_buf, size_t len,
3001 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3002 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3003 union ibmvnic_crq crq;
3005 char buff[9]; /* decimal max int plus \n and \0 */
3007 copy_from_user(buff, user_buf, sizeof(buff));
3008 val = kstrtoul(buff, 10, NULL);
3010 memset(&crq, 0, sizeof(crq));
3011 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3012 crq.control_ras.cmd = CONTROL_RAS;
3013 crq.control_ras.correlator =
3014 adapter->ras_comps[ras_comp_int->num].correlator;
3015 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3016 /* trace_buff_sz is 3 bytes, stuff an int into it */
3017 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3018 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3019 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3020 ibmvnic_send_crq(adapter, &crq);
3025 static const struct file_operations trace_size_ops = {
3026 .owner = THIS_MODULE,
3027 .open = ibmvnic_fw_comp_open,
3028 .read = trace_buff_size_read,
3029 .write = trace_buff_size_write,
3032 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3033 struct ibmvnic_adapter *adapter)
3035 struct device *dev = &adapter->vdev->dev;
3036 struct dentry *dir_ent;
3040 debugfs_remove_recursive(adapter->ras_comps_ent);
3042 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3043 adapter->debugfs_dir);
3044 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3045 dev_info(dev, "debugfs create ras_comps dir failed\n");
3049 for (i = 0; i < adapter->ras_comp_num; i++) {
3050 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3051 adapter->ras_comps_ent);
3052 if (!dir_ent || IS_ERR(dir_ent)) {
3053 dev_info(dev, "debugfs create %s dir failed\n",
3054 adapter->ras_comps[i].name);
3058 adapter->ras_comp_int[i].adapter = adapter;
3059 adapter->ras_comp_int[i].num = i;
3060 adapter->ras_comp_int[i].desc_blob.data =
3061 &adapter->ras_comps[i].description;
3062 adapter->ras_comp_int[i].desc_blob.size =
3063 sizeof(adapter->ras_comps[i].description);
3065 /* Don't need to remember the dentry's because the debugfs dir
3066 * gets removed recursively
3068 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3069 &adapter->ras_comp_int[i].desc_blob);
3070 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3071 dir_ent, &adapter->ras_comp_int[i],
3073 ent = debugfs_create_file("trace_level",
3075 (adapter->ras_comps[i].trace_level !=
3076 0xFF ? S_IWUSR : 0),
3077 dir_ent, &adapter->ras_comp_int[i],
3079 ent = debugfs_create_file("error_level",
3082 ras_comps[i].error_check_level !=
3083 0xFF ? S_IWUSR : 0),
3084 dir_ent, &adapter->ras_comp_int[i],
3086 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3087 dir_ent, &adapter->ras_comp_int[i],
3089 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3090 dir_ent, &adapter->ras_comp_int[i],
3092 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3093 &adapter->ras_comp_int[i],
3098 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3099 struct ibmvnic_adapter *adapter)
3101 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3102 struct device *dev = &adapter->vdev->dev;
3103 union ibmvnic_crq newcrq;
3105 adapter->ras_comps = dma_alloc_coherent(dev, len,
3106 &adapter->ras_comps_tok,
3108 if (!adapter->ras_comps) {
3109 if (!firmware_has_feature(FW_FEATURE_CMO))
3110 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3114 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3115 sizeof(struct ibmvnic_fw_comp_internal),
3117 if (!adapter->ras_comp_int)
3118 dma_free_coherent(dev, len, adapter->ras_comps,
3119 adapter->ras_comps_tok);
3121 memset(&newcrq, 0, sizeof(newcrq));
3122 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3123 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3124 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3125 newcrq.request_ras_comps.len = cpu_to_be32(len);
3126 ibmvnic_send_crq(adapter, &newcrq);
3129 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3131 struct ibmvnic_inflight_cmd *inflight_cmd;
3132 struct device *dev = &adapter->vdev->dev;
3133 struct ibmvnic_error_buff *error_buff;
3134 unsigned long flags;
3135 unsigned long flags2;
3137 spin_lock_irqsave(&adapter->inflight_lock, flags);
3138 list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
3139 switch (inflight_cmd->crq.generic.cmd) {
3141 dma_unmap_single(dev, adapter->login_buf_token,
3142 adapter->login_buf_sz,
3144 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3145 adapter->login_rsp_buf_sz,
3147 kfree(adapter->login_rsp_buf);
3148 kfree(adapter->login_buf);
3151 complete(&adapter->fw_done);
3153 case REQUEST_ERROR_INFO:
3154 spin_lock_irqsave(&adapter->error_list_lock, flags2);
3155 list_for_each_entry(error_buff, &adapter->errors,
3157 dma_unmap_single(dev, error_buff->dma,
3160 kfree(error_buff->buff);
3161 list_del(&error_buff->list);
3164 spin_unlock_irqrestore(&adapter->error_list_lock,
3168 list_del(&inflight_cmd->list);
3169 kfree(inflight_cmd);
3171 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3174 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3175 struct ibmvnic_adapter *adapter)
3177 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3178 struct net_device *netdev = adapter->netdev;
3179 struct device *dev = &adapter->vdev->dev;
3182 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3183 ((unsigned long int *)crq)[0],
3184 ((unsigned long int *)crq)[1]);
3185 switch (gen_crq->first) {
3186 case IBMVNIC_CRQ_INIT_RSP:
3187 switch (gen_crq->cmd) {
3188 case IBMVNIC_CRQ_INIT:
3189 dev_info(dev, "Partner initialized\n");
3190 /* Send back a response */
3191 rc = ibmvnic_send_crq_init_complete(adapter);
3193 send_version_xchg(adapter);
3195 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3197 case IBMVNIC_CRQ_INIT_COMPLETE:
3198 dev_info(dev, "Partner initialization complete\n");
3199 send_version_xchg(adapter);
3202 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3205 case IBMVNIC_CRQ_XPORT_EVENT:
3206 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3207 dev_info(dev, "Re-enabling adapter\n");
3208 adapter->migrated = true;
3209 ibmvnic_free_inflight(adapter);
3210 release_sub_crqs(adapter);
3211 rc = ibmvnic_reenable_crq_queue(adapter);
3213 dev_err(dev, "Error after enable rc=%ld\n", rc);
3214 adapter->migrated = false;
3215 rc = ibmvnic_send_crq_init(adapter);
3217 dev_err(dev, "Error sending init rc=%ld\n", rc);
3219 /* The adapter lost the connection */
3220 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3222 ibmvnic_free_inflight(adapter);
3223 release_sub_crqs(adapter);
3226 case IBMVNIC_CRQ_CMD_RSP:
3229 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3234 switch (gen_crq->cmd) {
3235 case VERSION_EXCHANGE_RSP:
3236 rc = crq->version_exchange_rsp.rc.code;
3238 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3241 dev_info(dev, "Partner protocol version is %d\n",
3242 crq->version_exchange_rsp.version);
3243 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3246 be16_to_cpu(crq->version_exchange_rsp.version);
3247 send_cap_queries(adapter);
3249 case QUERY_CAPABILITY_RSP:
3250 handle_query_cap_rsp(crq, adapter);
3253 handle_query_map_rsp(crq, adapter);
3255 case REQUEST_MAP_RSP:
3256 handle_request_map_rsp(crq, adapter);
3258 case REQUEST_UNMAP_RSP:
3259 handle_request_unmap_rsp(crq, adapter);
3261 case REQUEST_CAPABILITY_RSP:
3262 handle_request_cap_rsp(crq, adapter);
3265 netdev_dbg(netdev, "Got Login Response\n");
3266 handle_login_rsp(crq, adapter);
3268 case LOGICAL_LINK_STATE_RSP:
3269 netdev_dbg(netdev, "Got Logical Link State Response\n");
3270 adapter->logical_link_state =
3271 crq->logical_link_state_rsp.link_state;
3273 case LINK_STATE_INDICATION:
3274 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3275 adapter->phys_link_state =
3276 crq->link_state_indication.phys_link_state;
3277 adapter->logical_link_state =
3278 crq->link_state_indication.logical_link_state;
3280 case CHANGE_MAC_ADDR_RSP:
3281 netdev_dbg(netdev, "Got MAC address change Response\n");
3282 handle_change_mac_rsp(crq, adapter);
3284 case ERROR_INDICATION:
3285 netdev_dbg(netdev, "Got Error Indication\n");
3286 handle_error_indication(crq, adapter);
3288 case REQUEST_ERROR_RSP:
3289 netdev_dbg(netdev, "Got Error Detail Response\n");
3290 handle_error_info_rsp(crq, adapter);
3292 case REQUEST_STATISTICS_RSP:
3293 netdev_dbg(netdev, "Got Statistics Response\n");
3294 complete(&adapter->stats_done);
3296 case REQUEST_DUMP_SIZE_RSP:
3297 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3298 handle_dump_size_rsp(crq, adapter);
3300 case REQUEST_DUMP_RSP:
3301 netdev_dbg(netdev, "Got Request Dump Response\n");
3302 complete(&adapter->fw_done);
3304 case QUERY_IP_OFFLOAD_RSP:
3305 netdev_dbg(netdev, "Got Query IP offload Response\n");
3306 handle_query_ip_offload_rsp(adapter);
3308 case MULTICAST_CTRL_RSP:
3309 netdev_dbg(netdev, "Got multicast control Response\n");
3311 case CONTROL_IP_OFFLOAD_RSP:
3312 netdev_dbg(netdev, "Got Control IP offload Response\n");
3313 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3314 sizeof(adapter->ip_offload_ctrl),
3316 /* We're done with the queries, perform the login */
3317 send_login(adapter);
3319 case REQUEST_RAS_COMP_NUM_RSP:
3320 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3321 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3322 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3325 adapter->ras_comp_num =
3326 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3327 handle_request_ras_comp_num_rsp(crq, adapter);
3329 case REQUEST_RAS_COMPS_RSP:
3330 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3331 handle_request_ras_comps_rsp(crq, adapter);
3333 case CONTROL_RAS_RSP:
3334 netdev_dbg(netdev, "Got Control RAS Response\n");
3335 handle_control_ras_rsp(crq, adapter);
3337 case COLLECT_FW_TRACE_RSP:
3338 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3339 complete(&adapter->fw_done);
3342 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3347 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3349 struct ibmvnic_adapter *adapter = instance;
3350 struct ibmvnic_crq_queue *queue = &adapter->crq;
3351 struct vio_dev *vdev = adapter->vdev;
3352 union ibmvnic_crq *crq;
3353 unsigned long flags;
3356 spin_lock_irqsave(&queue->lock, flags);
3357 vio_disable_interrupts(vdev);
3359 /* Pull all the valid messages off the CRQ */
3360 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3361 ibmvnic_handle_crq(crq, adapter);
3362 crq->generic.first = 0;
3364 vio_enable_interrupts(vdev);
3365 crq = ibmvnic_next_crq(adapter);
3367 vio_disable_interrupts(vdev);
3368 ibmvnic_handle_crq(crq, adapter);
3369 crq->generic.first = 0;
3374 spin_unlock_irqrestore(&queue->lock, flags);
3378 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3380 struct vio_dev *vdev = adapter->vdev;
3384 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3385 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3388 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3393 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3395 struct ibmvnic_crq_queue *crq = &adapter->crq;
3396 struct device *dev = &adapter->vdev->dev;
3397 struct vio_dev *vdev = adapter->vdev;
3402 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3403 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3405 /* Clean out the queue */
3406 memset(crq->msgs, 0, PAGE_SIZE);
3409 /* And re-open it again */
3410 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3411 crq->msg_token, PAGE_SIZE);
3414 /* Adapter is good, but other end is not ready */
3415 dev_warn(dev, "Partner adapter not ready\n");
3417 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3422 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3424 struct ibmvnic_crq_queue *crq = &adapter->crq;
3425 struct vio_dev *vdev = adapter->vdev;
3428 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3429 free_irq(vdev->irq, adapter);
3431 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3432 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3434 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3436 free_page((unsigned long)crq->msgs);
3439 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3441 struct ibmvnic_crq_queue *crq = &adapter->crq;
3442 struct device *dev = &adapter->vdev->dev;
3443 struct vio_dev *vdev = adapter->vdev;
3444 int rc, retrc = -ENOMEM;
3446 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3447 /* Should we allocate more than one page? */
3452 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3453 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3455 if (dma_mapping_error(dev, crq->msg_token))
3458 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3459 crq->msg_token, PAGE_SIZE);
3461 if (rc == H_RESOURCE)
3462 /* maybe kexecing and resource is busy. try a reset */
3463 rc = ibmvnic_reset_crq(adapter);
3466 if (rc == H_CLOSED) {
3467 dev_warn(dev, "Partner adapter not ready\n");
3469 dev_warn(dev, "Error %d opening adapter\n", rc);
3470 goto reg_crq_failed;
3475 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3476 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3479 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3481 goto req_irq_failed;
3484 rc = vio_enable_interrupts(vdev);
3486 dev_err(dev, "Error %d enabling interrupts\n", rc);
3487 goto req_irq_failed;
3491 spin_lock_init(&crq->lock);
3497 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3498 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3500 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3502 free_page((unsigned long)crq->msgs);
3506 /* debugfs for dump */
3507 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3509 struct net_device *netdev = seq->private;
3510 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3511 struct device *dev = &adapter->vdev->dev;
3512 union ibmvnic_crq crq;
3514 memset(&crq, 0, sizeof(crq));
3515 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3516 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3517 ibmvnic_send_crq(adapter, &crq);
3519 init_completion(&adapter->fw_done);
3520 wait_for_completion(&adapter->fw_done);
3522 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3524 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3527 kfree(adapter->dump_data);
3532 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3534 return single_open(file, ibmvnic_dump_show, inode->i_private);
3537 static const struct file_operations ibmvnic_dump_ops = {
3538 .owner = THIS_MODULE,
3539 .open = ibmvnic_dump_open,
3541 .llseek = seq_lseek,
3542 .release = single_release,
3545 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3547 struct ibmvnic_adapter *adapter;
3548 struct net_device *netdev;
3549 unsigned char *mac_addr_p;
3551 char buf[16]; /* debugfs name buf */
3554 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3557 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3558 VETH_MAC_ADDR, NULL);
3561 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3562 __FILE__, __LINE__);
3566 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3567 IBMVNIC_MAX_TX_QUEUES);
3571 adapter = netdev_priv(netdev);
3572 dev_set_drvdata(&dev->dev, netdev);
3573 adapter->vdev = dev;
3574 adapter->netdev = netdev;
3576 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3577 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3578 netdev->irq = dev->irq;
3579 netdev->netdev_ops = &ibmvnic_netdev_ops;
3580 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3581 SET_NETDEV_DEV(netdev, &dev->dev);
3583 spin_lock_init(&adapter->stats_lock);
3585 rc = ibmvnic_init_crq_queue(adapter);
3587 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3591 INIT_LIST_HEAD(&adapter->errors);
3592 INIT_LIST_HEAD(&adapter->inflight);
3593 spin_lock_init(&adapter->error_list_lock);
3594 spin_lock_init(&adapter->inflight_lock);
3596 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3597 sizeof(struct ibmvnic_statistics),
3599 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3600 if (!firmware_has_feature(FW_FEATURE_CMO))
3601 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3605 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3606 ent = debugfs_create_dir(buf, NULL);
3607 if (!ent || IS_ERR(ent)) {
3608 dev_info(&dev->dev, "debugfs create directory failed\n");
3609 adapter->debugfs_dir = NULL;
3611 adapter->debugfs_dir = ent;
3612 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3613 netdev, &ibmvnic_dump_ops);
3614 if (!ent || IS_ERR(ent)) {
3616 "debugfs create dump file failed\n");
3617 adapter->debugfs_dump = NULL;
3619 adapter->debugfs_dump = ent;
3622 ibmvnic_send_crq_init(adapter);
3624 init_completion(&adapter->init_done);
3625 wait_for_completion(&adapter->init_done);
3627 /* needed to pull init_sub_crqs outside of an interrupt context
3628 * because it creates IRQ mappings for the subCRQ queues, causing
3631 init_sub_crqs(adapter, 0);
3633 reinit_completion(&adapter->init_done);
3634 wait_for_completion(&adapter->init_done);
3636 /* if init_sub_crqs is partially successful, retry */
3637 while (!adapter->tx_scrq || !adapter->rx_scrq) {
3638 init_sub_crqs(adapter, 1);
3640 reinit_completion(&adapter->init_done);
3641 wait_for_completion(&adapter->init_done);
3644 netdev->real_num_tx_queues = adapter->req_tx_queues;
3646 rc = register_netdev(netdev);
3648 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3651 dev_info(&dev->dev, "ibmvnic registered\n");
3656 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3657 debugfs_remove_recursive(adapter->debugfs_dir);
3659 ibmvnic_release_crq_queue(adapter);
3661 free_netdev(netdev);
3665 static int ibmvnic_remove(struct vio_dev *dev)
3667 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3668 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3670 unregister_netdev(netdev);
3672 release_sub_crqs(adapter);
3674 ibmvnic_release_crq_queue(adapter);
3676 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3677 debugfs_remove_recursive(adapter->debugfs_dir);
3679 if (adapter->ras_comps)
3680 dma_free_coherent(&dev->dev,
3681 adapter->ras_comp_num *
3682 sizeof(struct ibmvnic_fw_component),
3683 adapter->ras_comps, adapter->ras_comps_tok);
3685 kfree(adapter->ras_comp_int);
3687 free_netdev(netdev);
3688 dev_set_drvdata(&dev->dev, NULL);
3693 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3695 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3696 struct ibmvnic_adapter *adapter;
3697 struct iommu_table *tbl;
3698 unsigned long ret = 0;
3701 tbl = get_iommu_table_base(&vdev->dev);
3703 /* netdev inits at probe time along with the structures we need below*/
3705 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3707 adapter = netdev_priv(netdev);
3709 ret += PAGE_SIZE; /* the crq message queue */
3710 ret += adapter->bounce_buffer_size;
3711 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3713 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3714 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3716 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3718 ret += adapter->rx_pool[i].size *
3719 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3724 static int ibmvnic_resume(struct device *dev)
3726 struct net_device *netdev = dev_get_drvdata(dev);
3727 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3730 /* kick the interrupt handlers just in case we lost an interrupt */
3731 for (i = 0; i < adapter->req_rx_queues; i++)
3732 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3733 adapter->rx_scrq[i]);
3738 static struct vio_device_id ibmvnic_device_table[] = {
3739 {"network", "IBM,vnic"},
3742 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3744 static const struct dev_pm_ops ibmvnic_pm_ops = {
3745 .resume = ibmvnic_resume
3748 static struct vio_driver ibmvnic_driver = {
3749 .id_table = ibmvnic_device_table,
3750 .probe = ibmvnic_probe,
3751 .remove = ibmvnic_remove,
3752 .get_desired_dma = ibmvnic_get_desired_dma,
3753 .name = ibmvnic_driver_name,
3754 .pm = &ibmvnic_pm_ops,
3757 /* module functions */
3758 static int __init ibmvnic_module_init(void)
3760 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3761 IBMVNIC_DRIVER_VERSION);
3763 return vio_register_driver(&ibmvnic_driver);
3766 static void __exit ibmvnic_module_exit(void)
3768 vio_unregister_driver(&ibmvnic_driver);
3771 module_init(ibmvnic_module_init);
3772 module_exit(ibmvnic_module_exit);