2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Copyright (C) IBM Corporation, 2003, 2010
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
39 #include <linux/ethtool.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56 static struct kobj_type ktype_veth_pool;
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.05"
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
83 char name[ETH_GSTRING_LEN];
87 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
88 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90 struct ibmveth_stat ibmveth_stats[] = {
91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
93 { "replenish_add_buff_failure",
94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
103 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
106 /* simple methods of getting data from the current rxq entry */
107 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
109 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
112 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
114 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115 IBMVETH_RXQ_TOGGLE_SHIFT;
118 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
120 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
123 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
125 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
128 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
130 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
133 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
135 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
138 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
140 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
143 /* setup the initial settings for a buffer pool */
144 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145 u32 pool_index, u32 pool_size,
146 u32 buff_size, u32 pool_active)
148 pool->size = pool_size;
149 pool->index = pool_index;
150 pool->buff_size = buff_size;
151 pool->threshold = pool_size * 7 / 8;
152 pool->active = pool_active;
155 /* allocate and setup an buffer pool - called during open */
156 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
160 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
165 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
166 if (!pool->dma_addr) {
167 kfree(pool->free_map);
168 pool->free_map = NULL;
172 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
175 kfree(pool->dma_addr);
176 pool->dma_addr = NULL;
178 kfree(pool->free_map);
179 pool->free_map = NULL;
183 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
185 for (i = 0; i < pool->size; ++i)
186 pool->free_map[i] = i;
188 atomic_set(&pool->available, 0);
189 pool->producer_index = 0;
190 pool->consumer_index = 0;
195 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
197 unsigned long offset;
199 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
203 /* replenish the buffers for a pool. note that we don't need to
204 * skb_reserve these since they are used for incoming...
206 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207 struct ibmveth_buff_pool *pool)
210 u32 count = pool->size - atomic_read(&pool->available);
211 u32 buffers_added = 0;
213 unsigned int free_index, index;
215 unsigned long lpar_rc;
220 for (i = 0; i < count; ++i) {
221 union ibmveth_buf_desc desc;
223 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
226 netdev_dbg(adapter->netdev,
227 "replenish: unable to allocate skb\n");
228 adapter->replenish_no_mem++;
232 free_index = pool->consumer_index;
233 pool->consumer_index++;
234 if (pool->consumer_index >= pool->size)
235 pool->consumer_index = 0;
236 index = pool->free_map[free_index];
238 BUG_ON(index == IBM_VETH_INVALID_MAP);
239 BUG_ON(pool->skbuff[index] != NULL);
241 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242 pool->buff_size, DMA_FROM_DEVICE);
244 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
247 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248 pool->dma_addr[index] = dma_addr;
249 pool->skbuff[index] = skb;
251 correlator = ((u64)pool->index << 32) | index;
252 *(u64 *)skb->data = correlator;
254 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
255 desc.fields.address = dma_addr;
258 unsigned int len = min(pool->buff_size,
259 adapter->netdev->mtu +
261 ibmveth_flush_buffer(skb->data, len);
263 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
266 if (lpar_rc != H_SUCCESS) {
270 adapter->replenish_add_buff_success++;
275 atomic_add(buffers_added, &(pool->available));
279 pool->free_map[free_index] = index;
280 pool->skbuff[index] = NULL;
281 if (pool->consumer_index == 0)
282 pool->consumer_index = pool->size - 1;
284 pool->consumer_index--;
285 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
286 dma_unmap_single(&adapter->vdev->dev,
287 pool->dma_addr[index], pool->buff_size,
289 dev_kfree_skb_any(skb);
290 adapter->replenish_add_buff_failure++;
293 atomic_add(buffers_added, &(pool->available));
297 * The final 8 bytes of the buffer list is a counter of frames dropped
298 * because there was not a buffer in the buffer list capable of holding
301 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
303 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
305 adapter->rx_no_buffer = be64_to_cpup(p);
308 /* replenish routine */
309 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
313 adapter->replenish_task_cycles++;
315 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
316 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
319 (atomic_read(&pool->available) < pool->threshold))
320 ibmveth_replenish_buffer_pool(adapter, pool);
323 ibmveth_update_rx_no_buffer(adapter);
326 /* empty and free ana buffer pool - also used to do cleanup in error paths */
327 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
328 struct ibmveth_buff_pool *pool)
332 kfree(pool->free_map);
333 pool->free_map = NULL;
335 if (pool->skbuff && pool->dma_addr) {
336 for (i = 0; i < pool->size; ++i) {
337 struct sk_buff *skb = pool->skbuff[i];
339 dma_unmap_single(&adapter->vdev->dev,
343 dev_kfree_skb_any(skb);
344 pool->skbuff[i] = NULL;
349 if (pool->dma_addr) {
350 kfree(pool->dma_addr);
351 pool->dma_addr = NULL;
360 /* remove a buffer from a pool */
361 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
364 unsigned int pool = correlator >> 32;
365 unsigned int index = correlator & 0xffffffffUL;
366 unsigned int free_index;
369 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
370 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
372 skb = adapter->rx_buff_pool[pool].skbuff[index];
376 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
378 dma_unmap_single(&adapter->vdev->dev,
379 adapter->rx_buff_pool[pool].dma_addr[index],
380 adapter->rx_buff_pool[pool].buff_size,
383 free_index = adapter->rx_buff_pool[pool].producer_index;
384 adapter->rx_buff_pool[pool].producer_index++;
385 if (adapter->rx_buff_pool[pool].producer_index >=
386 adapter->rx_buff_pool[pool].size)
387 adapter->rx_buff_pool[pool].producer_index = 0;
388 adapter->rx_buff_pool[pool].free_map[free_index] = index;
392 atomic_dec(&(adapter->rx_buff_pool[pool].available));
395 /* get the current buffer on the rx queue */
396 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
398 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
399 unsigned int pool = correlator >> 32;
400 unsigned int index = correlator & 0xffffffffUL;
402 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
403 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
405 return adapter->rx_buff_pool[pool].skbuff[index];
408 /* recycle the current buffer on the rx queue */
409 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
411 u32 q_index = adapter->rx_queue.index;
412 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
413 unsigned int pool = correlator >> 32;
414 unsigned int index = correlator & 0xffffffffUL;
415 union ibmveth_buf_desc desc;
416 unsigned long lpar_rc;
419 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
420 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
422 if (!adapter->rx_buff_pool[pool].active) {
423 ibmveth_rxq_harvest_buffer(adapter);
424 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
428 desc.fields.flags_len = IBMVETH_BUF_VALID |
429 adapter->rx_buff_pool[pool].buff_size;
430 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
432 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
434 if (lpar_rc != H_SUCCESS) {
435 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
436 "during recycle rc=%ld", lpar_rc);
437 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
441 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
442 adapter->rx_queue.index = 0;
443 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
450 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
452 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
454 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
455 adapter->rx_queue.index = 0;
456 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
460 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
463 struct device *dev = &adapter->vdev->dev;
465 if (adapter->buffer_list_addr != NULL) {
466 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
467 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
469 adapter->buffer_list_dma = DMA_ERROR_CODE;
471 free_page((unsigned long)adapter->buffer_list_addr);
472 adapter->buffer_list_addr = NULL;
475 if (adapter->filter_list_addr != NULL) {
476 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
477 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
479 adapter->filter_list_dma = DMA_ERROR_CODE;
481 free_page((unsigned long)adapter->filter_list_addr);
482 adapter->filter_list_addr = NULL;
485 if (adapter->rx_queue.queue_addr != NULL) {
486 dma_free_coherent(dev, adapter->rx_queue.queue_len,
487 adapter->rx_queue.queue_addr,
488 adapter->rx_queue.queue_dma);
489 adapter->rx_queue.queue_addr = NULL;
492 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
493 if (adapter->rx_buff_pool[i].active)
494 ibmveth_free_buffer_pool(adapter,
495 &adapter->rx_buff_pool[i]);
497 if (adapter->bounce_buffer != NULL) {
498 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
499 dma_unmap_single(&adapter->vdev->dev,
500 adapter->bounce_buffer_dma,
501 adapter->netdev->mtu + IBMVETH_BUFF_OH,
503 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
505 kfree(adapter->bounce_buffer);
506 adapter->bounce_buffer = NULL;
510 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
511 union ibmveth_buf_desc rxq_desc, u64 mac_address)
513 int rc, try_again = 1;
516 * After a kexec the adapter will still be open, so our attempt to
517 * open it will fail. So if we get a failure we free the adapter and
518 * try again, but only once.
521 rc = h_register_logical_lan(adapter->vdev->unit_address,
522 adapter->buffer_list_dma, rxq_desc.desc,
523 adapter->filter_list_dma, mac_address);
525 if (rc != H_SUCCESS && try_again) {
527 rc = h_free_logical_lan(adapter->vdev->unit_address);
528 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
537 static u64 ibmveth_encode_mac_addr(u8 *mac)
542 for (i = 0; i < ETH_ALEN; i++)
543 encoded = (encoded << 8) | mac[i];
548 static int ibmveth_open(struct net_device *netdev)
550 struct ibmveth_adapter *adapter = netdev_priv(netdev);
553 unsigned long lpar_rc;
555 union ibmveth_buf_desc rxq_desc;
559 netdev_dbg(netdev, "open starting\n");
561 napi_enable(&adapter->napi);
563 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
564 rxq_entries += adapter->rx_buff_pool[i].size;
566 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
567 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
569 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
570 netdev_err(netdev, "unable to allocate filter or buffer list "
576 dev = &adapter->vdev->dev;
578 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
580 adapter->rx_queue.queue_addr =
581 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
582 &adapter->rx_queue.queue_dma, GFP_KERNEL);
583 if (!adapter->rx_queue.queue_addr) {
588 adapter->buffer_list_dma = dma_map_single(dev,
589 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
590 adapter->filter_list_dma = dma_map_single(dev,
591 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
593 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
594 (dma_mapping_error(dev, adapter->filter_list_dma))) {
595 netdev_err(netdev, "unable to map filter or buffer list "
601 adapter->rx_queue.index = 0;
602 adapter->rx_queue.num_slots = rxq_entries;
603 adapter->rx_queue.toggle = 1;
605 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
607 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
608 adapter->rx_queue.queue_len;
609 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
611 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
612 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
613 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
615 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
617 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
619 if (lpar_rc != H_SUCCESS) {
620 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
622 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
623 "desc:0x%llx MAC:0x%llx\n",
624 adapter->buffer_list_dma,
625 adapter->filter_list_dma,
632 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
633 if (!adapter->rx_buff_pool[i].active)
635 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
636 netdev_err(netdev, "unable to alloc pool\n");
637 adapter->rx_buff_pool[i].active = 0;
643 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
644 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
647 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
650 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
651 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
656 adapter->bounce_buffer =
657 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
658 if (!adapter->bounce_buffer) {
660 goto err_out_free_irq;
662 adapter->bounce_buffer_dma =
663 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
664 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
665 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
666 netdev_err(netdev, "unable to map bounce buffer\n");
668 goto err_out_free_irq;
671 netdev_dbg(netdev, "initial replenish cycle\n");
672 ibmveth_interrupt(netdev->irq, netdev);
674 netif_start_queue(netdev);
676 netdev_dbg(netdev, "open complete\n");
681 free_irq(netdev->irq, netdev);
683 ibmveth_cleanup(adapter);
684 napi_disable(&adapter->napi);
688 static int ibmveth_close(struct net_device *netdev)
690 struct ibmveth_adapter *adapter = netdev_priv(netdev);
693 netdev_dbg(netdev, "close starting\n");
695 napi_disable(&adapter->napi);
697 if (!adapter->pool_config)
698 netif_stop_queue(netdev);
700 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
703 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
704 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
706 if (lpar_rc != H_SUCCESS) {
707 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
708 "continuing with close\n", lpar_rc);
711 free_irq(netdev->irq, netdev);
713 ibmveth_update_rx_no_buffer(adapter);
715 ibmveth_cleanup(adapter);
717 netdev_dbg(netdev, "close complete\n");
722 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
724 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
726 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
728 ethtool_cmd_speed_set(cmd, SPEED_1000);
729 cmd->duplex = DUPLEX_FULL;
730 cmd->port = PORT_FIBRE;
731 cmd->phy_address = 0;
732 cmd->transceiver = XCVR_INTERNAL;
733 cmd->autoneg = AUTONEG_ENABLE;
739 static void netdev_get_drvinfo(struct net_device *dev,
740 struct ethtool_drvinfo *info)
742 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
743 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
746 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
747 netdev_features_t features)
750 * Since the ibmveth firmware interface does not have the
751 * concept of separate tx/rx checksum offload enable, if rx
752 * checksum is disabled we also have to disable tx checksum
753 * offload. Once we disable rx checksum offload, we are no
754 * longer allowed to send tx buffers that are not properly
758 if (!(features & NETIF_F_RXCSUM))
759 features &= ~NETIF_F_ALL_CSUM;
764 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
766 struct ibmveth_adapter *adapter = netdev_priv(dev);
767 unsigned long set_attr, clr_attr, ret_attr;
768 unsigned long set_attr6, clr_attr6;
769 long ret, ret4, ret6;
770 int rc1 = 0, rc2 = 0;
773 if (netif_running(dev)) {
775 adapter->pool_config = 1;
777 adapter->pool_config = 0;
786 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
787 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
789 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
790 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
793 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
795 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
796 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
797 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
798 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
799 set_attr, &ret_attr);
801 if (ret4 != H_SUCCESS) {
802 netdev_err(dev, "unable to change IPv4 checksum "
803 "offload settings. %d rc=%ld\n",
806 h_illan_attributes(adapter->vdev->unit_address,
807 set_attr, clr_attr, &ret_attr);
810 dev->features &= ~NETIF_F_IP_CSUM;
813 adapter->fw_ipv4_csum_support = data;
816 ret6 = h_illan_attributes(adapter->vdev->unit_address,
817 clr_attr6, set_attr6, &ret_attr);
819 if (ret6 != H_SUCCESS) {
820 netdev_err(dev, "unable to change IPv6 checksum "
821 "offload settings. %d rc=%ld\n",
824 h_illan_attributes(adapter->vdev->unit_address,
825 set_attr6, clr_attr6, &ret_attr);
828 dev->features &= ~NETIF_F_IPV6_CSUM;
831 adapter->fw_ipv6_csum_support = data;
833 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
834 adapter->rx_csum = data;
839 netdev_err(dev, "unable to change checksum offload settings."
840 " %d rc=%ld ret_attr=%lx\n", data, ret,
845 rc2 = ibmveth_open(dev);
847 return rc1 ? rc1 : rc2;
850 static int ibmveth_set_features(struct net_device *dev,
851 netdev_features_t features)
853 struct ibmveth_adapter *adapter = netdev_priv(dev);
854 int rx_csum = !!(features & NETIF_F_RXCSUM);
856 netdev_features_t changed = features ^ dev->features;
858 if (features & NETIF_F_TSO & changed)
859 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
861 if (rx_csum == adapter->rx_csum)
864 rc = ibmveth_set_csum_offload(dev, rx_csum);
865 if (rc && !adapter->rx_csum)
866 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
871 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
875 if (stringset != ETH_SS_STATS)
878 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
879 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
882 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
886 return ARRAY_SIZE(ibmveth_stats);
892 static void ibmveth_get_ethtool_stats(struct net_device *dev,
893 struct ethtool_stats *stats, u64 *data)
896 struct ibmveth_adapter *adapter = netdev_priv(dev);
898 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
899 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
902 static const struct ethtool_ops netdev_ethtool_ops = {
903 .get_drvinfo = netdev_get_drvinfo,
904 .get_settings = netdev_get_settings,
905 .get_link = ethtool_op_get_link,
906 .get_strings = ibmveth_get_strings,
907 .get_sset_count = ibmveth_get_sset_count,
908 .get_ethtool_stats = ibmveth_get_ethtool_stats,
911 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
916 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
918 static int ibmveth_send(struct ibmveth_adapter *adapter,
919 union ibmveth_buf_desc *descs)
921 unsigned long correlator;
922 unsigned int retry_count;
926 * The retry count sets a maximum for the number of broadcast and
927 * multicast destinations within the system.
932 ret = h_send_logical_lan(adapter->vdev->unit_address,
933 descs[0].desc, descs[1].desc,
934 descs[2].desc, descs[3].desc,
935 descs[4].desc, descs[5].desc,
936 correlator, &correlator);
937 } while ((ret == H_BUSY) && (retry_count--));
939 if (ret != H_SUCCESS && ret != H_DROPPED) {
940 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
941 "with rc=%ld\n", ret);
948 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
949 struct net_device *netdev)
951 struct ibmveth_adapter *adapter = netdev_priv(netdev);
952 unsigned int desc_flags;
953 union ibmveth_buf_desc descs[6];
955 int force_bounce = 0;
959 * veth handles a maximum of 6 segments including the header, so
960 * we have to linearize the skb if there are more than this.
962 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
963 netdev->stats.tx_dropped++;
967 /* veth can't checksum offload UDP */
968 if (skb->ip_summed == CHECKSUM_PARTIAL &&
969 ((skb->protocol == htons(ETH_P_IP) &&
970 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
971 (skb->protocol == htons(ETH_P_IPV6) &&
972 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
973 skb_checksum_help(skb)) {
975 netdev_err(netdev, "tx: failed to checksum packet\n");
976 netdev->stats.tx_dropped++;
980 desc_flags = IBMVETH_BUF_VALID;
982 if (skb->ip_summed == CHECKSUM_PARTIAL) {
983 unsigned char *buf = skb_transport_header(skb) +
986 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
988 /* Need to zero out the checksum */
994 memset(descs, 0, sizeof(descs));
997 * If a linear packet is below the rx threshold then
998 * copy it into the static bounce buffer. This avoids the
999 * cost of a TCE insert and remove.
1001 if (force_bounce || (!skb_is_nonlinear(skb) &&
1002 (skb->len < tx_copybreak))) {
1003 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1006 descs[0].fields.flags_len = desc_flags | skb->len;
1007 descs[0].fields.address = adapter->bounce_buffer_dma;
1009 if (ibmveth_send(adapter, descs)) {
1010 adapter->tx_send_failed++;
1011 netdev->stats.tx_dropped++;
1013 netdev->stats.tx_packets++;
1014 netdev->stats.tx_bytes += skb->len;
1020 /* Map the header */
1021 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1022 skb_headlen(skb), DMA_TO_DEVICE);
1023 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1026 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1027 descs[0].fields.address = dma_addr;
1030 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1031 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1033 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1034 skb_frag_size(frag), DMA_TO_DEVICE);
1036 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1037 goto map_failed_frags;
1039 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1040 descs[i+1].fields.address = dma_addr;
1043 if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
1044 /* Put -1 in the IP checksum to tell phyp it
1045 * is a largesend packet and put the mss in the TCP checksum.
1047 ip_hdr(skb)->check = 0xffff;
1048 tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
1049 adapter->tx_large_packets++;
1052 if (ibmveth_send(adapter, descs)) {
1053 adapter->tx_send_failed++;
1054 netdev->stats.tx_dropped++;
1056 netdev->stats.tx_packets++;
1057 netdev->stats.tx_bytes += skb->len;
1060 dma_unmap_single(&adapter->vdev->dev,
1061 descs[0].fields.address,
1062 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1065 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1066 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1067 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1071 dev_consume_skb_any(skb);
1072 return NETDEV_TX_OK;
1076 for (i = 0; i < last; i++)
1077 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1078 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1082 if (!firmware_has_feature(FW_FEATURE_CMO))
1083 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1084 adapter->tx_map_failed++;
1090 static int ibmveth_poll(struct napi_struct *napi, int budget)
1092 struct ibmveth_adapter *adapter =
1093 container_of(napi, struct ibmveth_adapter, napi);
1094 struct net_device *netdev = adapter->netdev;
1095 int frames_processed = 0;
1096 unsigned long lpar_rc;
1099 while (frames_processed < budget) {
1100 if (!ibmveth_rxq_pending_buffer(adapter))
1104 if (!ibmveth_rxq_buffer_valid(adapter)) {
1105 wmb(); /* suggested by larson1 */
1106 adapter->rx_invalid_buffer++;
1107 netdev_dbg(netdev, "recycling invalid buffer\n");
1108 ibmveth_rxq_recycle_buffer(adapter);
1110 struct sk_buff *skb, *new_skb;
1111 int length = ibmveth_rxq_frame_length(adapter);
1112 int offset = ibmveth_rxq_frame_offset(adapter);
1113 int csum_good = ibmveth_rxq_csum_good(adapter);
1115 skb = ibmveth_rxq_get_buffer(adapter);
1118 if (length < rx_copybreak)
1119 new_skb = netdev_alloc_skb(netdev, length);
1122 skb_copy_to_linear_data(new_skb,
1126 ibmveth_flush_buffer(skb->data,
1128 if (!ibmveth_rxq_recycle_buffer(adapter))
1132 ibmveth_rxq_harvest_buffer(adapter);
1133 skb_reserve(skb, offset);
1136 skb_put(skb, length);
1137 skb->protocol = eth_type_trans(skb, netdev);
1140 skb->ip_summed = CHECKSUM_UNNECESSARY;
1142 napi_gro_receive(napi, skb); /* send it up */
1144 netdev->stats.rx_packets++;
1145 netdev->stats.rx_bytes += length;
1150 ibmveth_replenish_task(adapter);
1152 if (frames_processed < budget) {
1153 napi_complete(napi);
1155 /* We think we are done - reenable interrupts,
1156 * then check once more to make sure we are done.
1158 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1161 BUG_ON(lpar_rc != H_SUCCESS);
1163 if (ibmveth_rxq_pending_buffer(adapter) &&
1164 napi_reschedule(napi)) {
1165 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1171 return frames_processed;
1174 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1176 struct net_device *netdev = dev_instance;
1177 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1178 unsigned long lpar_rc;
1180 if (napi_schedule_prep(&adapter->napi)) {
1181 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1183 BUG_ON(lpar_rc != H_SUCCESS);
1184 __napi_schedule(&adapter->napi);
1189 static void ibmveth_set_multicast_list(struct net_device *netdev)
1191 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1192 unsigned long lpar_rc;
1194 if ((netdev->flags & IFF_PROMISC) ||
1195 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1196 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1197 IbmVethMcastEnableRecv |
1198 IbmVethMcastDisableFiltering,
1200 if (lpar_rc != H_SUCCESS) {
1201 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1202 "entering promisc mode\n", lpar_rc);
1205 struct netdev_hw_addr *ha;
1206 /* clear the filter table & disable filtering */
1207 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1208 IbmVethMcastEnableRecv |
1209 IbmVethMcastDisableFiltering |
1210 IbmVethMcastClearFilterTable,
1212 if (lpar_rc != H_SUCCESS) {
1213 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1214 "attempting to clear filter table\n",
1217 /* add the addresses to the filter table */
1218 netdev_for_each_mc_addr(ha, netdev) {
1219 /* add the multicast address to the filter table */
1221 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1222 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1223 IbmVethMcastAddFilter,
1225 if (lpar_rc != H_SUCCESS) {
1226 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1227 "when adding an entry to the filter "
1228 "table\n", lpar_rc);
1232 /* re-enable filtering */
1233 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1234 IbmVethMcastEnableFiltering,
1236 if (lpar_rc != H_SUCCESS) {
1237 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1238 "enabling filtering\n", lpar_rc);
1243 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1245 struct ibmveth_adapter *adapter = netdev_priv(dev);
1246 struct vio_dev *viodev = adapter->vdev;
1247 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1249 int need_restart = 0;
1251 if (new_mtu < IBMVETH_MIN_MTU)
1254 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1255 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1258 if (i == IBMVETH_NUM_BUFF_POOLS)
1261 /* Deactivate all the buffer pools so that the next loop can activate
1262 only the buffer pools necessary to hold the new MTU */
1263 if (netif_running(adapter->netdev)) {
1265 adapter->pool_config = 1;
1266 ibmveth_close(adapter->netdev);
1267 adapter->pool_config = 0;
1270 /* Look for an active buffer pool that can hold the new MTU */
1271 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1272 adapter->rx_buff_pool[i].active = 1;
1274 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1276 vio_cmo_set_dev_desired(viodev,
1277 ibmveth_get_desired_dma
1280 return ibmveth_open(adapter->netdev);
1286 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1292 #ifdef CONFIG_NET_POLL_CONTROLLER
1293 static void ibmveth_poll_controller(struct net_device *dev)
1295 ibmveth_replenish_task(netdev_priv(dev));
1296 ibmveth_interrupt(dev->irq, dev);
1301 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1303 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1306 * Number of bytes of IO data the driver will need to perform well.
1308 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1310 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1311 struct ibmveth_adapter *adapter;
1312 struct iommu_table *tbl;
1317 tbl = get_iommu_table_base(&vdev->dev);
1319 /* netdev inits at probe time along with the structures we need below*/
1321 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1323 adapter = netdev_priv(netdev);
1325 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1326 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1328 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1329 /* add the size of the active receive buffers */
1330 if (adapter->rx_buff_pool[i].active)
1332 adapter->rx_buff_pool[i].size *
1333 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1335 rxqentries += adapter->rx_buff_pool[i].size;
1337 /* add the size of the receive queue entries */
1338 ret += IOMMU_PAGE_ALIGN(
1339 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1344 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1346 struct ibmveth_adapter *adapter = netdev_priv(dev);
1347 struct sockaddr *addr = p;
1351 if (!is_valid_ether_addr(addr->sa_data))
1352 return -EADDRNOTAVAIL;
1354 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1355 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1357 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1361 ether_addr_copy(dev->dev_addr, addr->sa_data);
1366 static const struct net_device_ops ibmveth_netdev_ops = {
1367 .ndo_open = ibmveth_open,
1368 .ndo_stop = ibmveth_close,
1369 .ndo_start_xmit = ibmveth_start_xmit,
1370 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1371 .ndo_do_ioctl = ibmveth_ioctl,
1372 .ndo_change_mtu = ibmveth_change_mtu,
1373 .ndo_fix_features = ibmveth_fix_features,
1374 .ndo_set_features = ibmveth_set_features,
1375 .ndo_validate_addr = eth_validate_addr,
1376 .ndo_set_mac_address = ibmveth_set_mac_addr,
1377 #ifdef CONFIG_NET_POLL_CONTROLLER
1378 .ndo_poll_controller = ibmveth_poll_controller,
1382 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1385 struct net_device *netdev;
1386 struct ibmveth_adapter *adapter;
1387 unsigned char *mac_addr_p;
1388 unsigned int *mcastFilterSize_p;
1390 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1393 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1396 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1399 /* Workaround for old/broken pHyp */
1402 else if (mac_len != 6) {
1403 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1408 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1409 VETH_MCAST_FILTER_SIZE, NULL);
1410 if (!mcastFilterSize_p) {
1411 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1416 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1421 adapter = netdev_priv(netdev);
1422 dev_set_drvdata(&dev->dev, netdev);
1424 adapter->vdev = dev;
1425 adapter->netdev = netdev;
1426 adapter->mcastFilterSize = *mcastFilterSize_p;
1427 adapter->pool_config = 0;
1429 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1431 netdev->irq = dev->irq;
1432 netdev->netdev_ops = &ibmveth_netdev_ops;
1433 netdev->ethtool_ops = &netdev_ethtool_ops;
1434 SET_NETDEV_DEV(netdev, &dev->dev);
1435 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1436 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1437 netdev->features |= netdev->hw_features;
1439 /* TSO is disabled by default */
1440 netdev->hw_features |= NETIF_F_TSO;
1442 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1444 if (firmware_has_feature(FW_FEATURE_CMO))
1445 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1447 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1448 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1451 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1452 pool_count[i], pool_size[i],
1454 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1455 &dev->dev.kobj, "pool%d", i);
1457 kobject_uevent(kobj, KOBJ_ADD);
1460 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1462 adapter->buffer_list_dma = DMA_ERROR_CODE;
1463 adapter->filter_list_dma = DMA_ERROR_CODE;
1464 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1466 netdev_dbg(netdev, "registering netdev...\n");
1468 ibmveth_set_features(netdev, netdev->features);
1470 rc = register_netdev(netdev);
1473 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1474 free_netdev(netdev);
1478 netdev_dbg(netdev, "registered\n");
1483 static int ibmveth_remove(struct vio_dev *dev)
1485 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1486 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1489 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1490 kobject_put(&adapter->rx_buff_pool[i].kobj);
1492 unregister_netdev(netdev);
1494 free_netdev(netdev);
1495 dev_set_drvdata(&dev->dev, NULL);
1500 static struct attribute veth_active_attr;
1501 static struct attribute veth_num_attr;
1502 static struct attribute veth_size_attr;
1504 static ssize_t veth_pool_show(struct kobject *kobj,
1505 struct attribute *attr, char *buf)
1507 struct ibmveth_buff_pool *pool = container_of(kobj,
1508 struct ibmveth_buff_pool,
1511 if (attr == &veth_active_attr)
1512 return sprintf(buf, "%d\n", pool->active);
1513 else if (attr == &veth_num_attr)
1514 return sprintf(buf, "%d\n", pool->size);
1515 else if (attr == &veth_size_attr)
1516 return sprintf(buf, "%d\n", pool->buff_size);
1520 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1521 const char *buf, size_t count)
1523 struct ibmveth_buff_pool *pool = container_of(kobj,
1524 struct ibmveth_buff_pool,
1526 struct net_device *netdev = dev_get_drvdata(
1527 container_of(kobj->parent, struct device, kobj));
1528 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1529 long value = simple_strtol(buf, NULL, 10);
1532 if (attr == &veth_active_attr) {
1533 if (value && !pool->active) {
1534 if (netif_running(netdev)) {
1535 if (ibmveth_alloc_buffer_pool(pool)) {
1537 "unable to alloc pool\n");
1541 adapter->pool_config = 1;
1542 ibmveth_close(netdev);
1543 adapter->pool_config = 0;
1544 if ((rc = ibmveth_open(netdev)))
1549 } else if (!value && pool->active) {
1550 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1552 /* Make sure there is a buffer pool with buffers that
1553 can hold a packet of the size of the MTU */
1554 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1555 if (pool == &adapter->rx_buff_pool[i])
1557 if (!adapter->rx_buff_pool[i].active)
1559 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1563 if (i == IBMVETH_NUM_BUFF_POOLS) {
1564 netdev_err(netdev, "no active pool >= MTU\n");
1568 if (netif_running(netdev)) {
1569 adapter->pool_config = 1;
1570 ibmveth_close(netdev);
1572 adapter->pool_config = 0;
1573 if ((rc = ibmveth_open(netdev)))
1578 } else if (attr == &veth_num_attr) {
1579 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1582 if (netif_running(netdev)) {
1583 adapter->pool_config = 1;
1584 ibmveth_close(netdev);
1585 adapter->pool_config = 0;
1587 if ((rc = ibmveth_open(netdev)))
1593 } else if (attr == &veth_size_attr) {
1594 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1597 if (netif_running(netdev)) {
1598 adapter->pool_config = 1;
1599 ibmveth_close(netdev);
1600 adapter->pool_config = 0;
1601 pool->buff_size = value;
1602 if ((rc = ibmveth_open(netdev)))
1605 pool->buff_size = value;
1610 /* kick the interrupt handler to allocate/deallocate pools */
1611 ibmveth_interrupt(netdev->irq, netdev);
1616 #define ATTR(_name, _mode) \
1617 struct attribute veth_##_name##_attr = { \
1618 .name = __stringify(_name), .mode = _mode, \
1621 static ATTR(active, 0644);
1622 static ATTR(num, 0644);
1623 static ATTR(size, 0644);
1625 static struct attribute *veth_pool_attrs[] = {
1632 static const struct sysfs_ops veth_pool_ops = {
1633 .show = veth_pool_show,
1634 .store = veth_pool_store,
1637 static struct kobj_type ktype_veth_pool = {
1639 .sysfs_ops = &veth_pool_ops,
1640 .default_attrs = veth_pool_attrs,
1643 static int ibmveth_resume(struct device *dev)
1645 struct net_device *netdev = dev_get_drvdata(dev);
1646 ibmveth_interrupt(netdev->irq, netdev);
1650 static struct vio_device_id ibmveth_device_table[] = {
1651 { "network", "IBM,l-lan"},
1654 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1656 static struct dev_pm_ops ibmveth_pm_ops = {
1657 .resume = ibmveth_resume
1660 static struct vio_driver ibmveth_driver = {
1661 .id_table = ibmveth_device_table,
1662 .probe = ibmveth_probe,
1663 .remove = ibmveth_remove,
1664 .get_desired_dma = ibmveth_get_desired_dma,
1665 .name = ibmveth_driver_name,
1666 .pm = &ibmveth_pm_ops,
1669 static int __init ibmveth_module_init(void)
1671 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1672 ibmveth_driver_string, ibmveth_driver_version);
1674 return vio_register_driver(&ibmveth_driver);
1677 static void __exit ibmveth_module_exit(void)
1679 vio_unregister_driver(&ibmveth_driver);
1682 module_init(ibmveth_module_init);
1683 module_exit(ibmveth_module_exit);