INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x108B),
INTEL_E1000_ETHERNET_DEVICE(0x108C),
+ INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A),
+ INTEL_E1000_ETHERNET_DEVICE(0x10B5),
/* required last entry */
{0,}
};
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
- adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+ adapter->alloc_rx_buf(adapter, ring,
+ E1000_DESC_UNUSED(ring));
}
#ifdef CONFIG_PCI_MSI
case e1000_82546:
case e1000_82546_rev_3:
case e1000_82571:
- if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
- && (adapter->hw.media_type == e1000_media_type_copper)) {
+ if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
e1000_read_eeprom(&adapter->hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break;
if(eeprom_data & eeprom_apme_mask)
adapter->wol |= E1000_WUFC_MAG;
+ /* print bus type/speed/width info */
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+ (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+ ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+ (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+ (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+ (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+ (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+ (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+ "32-bit"));
+ }
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
/* reset the hardware with the new settings */
e1000_reset(adapter);
{
uint32_t rctl, rfctl;
uint32_t psrctl = 0;
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
uint32_t pages = 0;
#endif
}
}
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
/* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
}
- if(buffer_info->skb) {
+ if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
+ memset(buffer_info, 0, sizeof(struct e1000_buffer));
}
/**
for(i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if(buffer_info->skb) {
- ps_page = &rx_ring->ps_page[i];
- ps_page_dma = &rx_ring->ps_page_dma[i];
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return TRUE;
}
#endif
- return 0;
+ return FALSE;
}
static inline boolean_t
{
struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota);
- int tx_cleaned, i = 0, work_done = 0;
+ int tx_cleaned = 0, i = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv;
BUG();
}
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+ if (likely(adapter->num_tx_queues == 1)) {
+ /* e1000_clean is called per-cpu. This lock protects
+ * tx_ring[0] from being cleaned by multiple cpus
+ * simultaneously. A failure obtaining the lock means
+ * tx_ring[0] is currently being cleaned anyway. */
+ if (spin_trylock(&adapter->tx_queue_lock)) {
+ tx_cleaned = e1000_clean_tx_irq(adapter,
+ &adapter->tx_ring[0]);
+ spin_unlock(&adapter->tx_queue_lock);
+ }
+ } else
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+
adapter->clean_rx(adapter, &adapter->rx_ring[i],
&work_done, work_to_do);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
+#ifdef CONFIG_E1000_MQ
+ tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
-
- tx_desc->buffer_addr = 0;
- tx_desc->lower.data = 0;
- tx_desc->upper.data = 0;
+ memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
if(unlikely(++i == tx_ring->count)) i = 0;
}
uint32_t length;
uint8_t last_byte;
unsigned int i;
- boolean_t cleaned = FALSE;
int cleaned_count = 0;
+ boolean_t cleaned = FALSE, multi_descriptor = FALSE;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
while(rx_desc->status & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
+ u8 status;
#ifdef CONFIG_E1000_NAPI
if(*work_done >= work_to_do)
break;
(*work_done)++;
#endif
-
+ status = rx_desc->status;
cleaned = TRUE;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
+ pci_unmap_single(pdev,
+ buffer_info->dma,
+ buffer_info->length,
PCI_DMA_FROMDEVICE);
skb = buffer_info->skb;
if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
rx_desc->errors, length, last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
- e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats,
+ e1000_tbi_adjust_stats(&adapter->hw,
+ &adapter->stats,
length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
flags);
}
}
- /* Good Receive */
- skb_put(skb, length - ETHERNET_FCS_SIZE);
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+#define E1000_CB_LENGTH 256
+ if ((length < E1000_CB_LENGTH) &&
+ !rx_ring->rx_skb_top &&
+ /* or maybe (status & E1000_RXD_STAT_EOP) && */
+ !multi_descriptor) {
+ struct sk_buff *new_skb =
+ dev_alloc_skb(length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ new_skb->dev = netdev;
+ memcpy(new_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ skb_put(skb, length);
+ }
+ }
+
+ /* end copybreak code */
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, (uint32_t)(rx_desc->status) |
+ e1000_rx_checksum(adapter,
+ (uint32_t)(status) |
((uint32_t)(rx_desc->errors) << 24),
rx_desc->csum, skb);
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_E1000_NAPI
if(unlikely(adapter->vlgrp &&
- (rx_desc->status & E1000_RXD_STAT_VP))) {
+ (status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
E1000_RXD_SPC_VLAN_MASK);
static void
e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
- int cleaned_count)
+ int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
- while(!buffer_info->skb) {
- skb = dev_alloc_skb(bufsz);
+ while (cleaned_count--) {
+ if (!(skb = buffer_info->skb))
+ skb = dev_alloc_skb(bufsz);
+ else {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
if(unlikely(!skb)) {
/* Better luck next round */
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
+map_skb:
buffer_info->dma = pci_map_single(pdev,
skb->data,
adapter->rx_buffer_len,
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol;
+ int retval = 0;
netif_device_detach(netdev);
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
- pci_enable_wake(pdev, 3, 1);
- pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
} else {
E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
- pci_enable_wake(pdev, 3, 0);
- pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
}
pci_save_state(pdev);
if(manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
- pci_enable_wake(pdev, 3, 1);
- pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+ retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
}
}
e1000_release_hw_control(adapter);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error in setting power state\n");
return 0;
}
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int retval;
uint32_t manc, ret_val;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ retval = pci_set_power_state(pdev, PCI_D0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error in setting power state\n");
ret_val = pci_enable_device(pdev);
pci_set_master(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+ retval = pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (retval)
+ DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);