patch-2.3.43 linux/drivers/net/acenic.c
Next file: linux/drivers/net/acenic.h
Previous file: linux/drivers/net/ac3200.c
Back to the patch index
Back to the overall index
- Lines: 924
- Date:
Thu Feb 10 12:22:03 2000
- Orig file:
v2.3.42/linux/drivers/net/acenic.c
- Orig date:
Thu Nov 11 20:11:39 1999
diff -u --recursive --new-file v2.3.42/linux/drivers/net/acenic.c linux/drivers/net/acenic.c
@@ -20,11 +20,13 @@
* Additional work by Pete Wyckoff <wyckoff@ca.sandia.gov> for initial
* Alpha and trace dump support. The trace dump support has not been
* integrated yet however.
+ *
+ * Big-endian+Sparc fixes and conversion to new PCI dma mapping
+ * infrastructure by David S. Miller <davem@redhat.com>.
*/
#include <linux/config.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -83,10 +85,6 @@
#define wmb() mb()
#endif
-#if (LINUX_VERSION_CODE < 0x02030e)
-#define net_device device
-#endif
-
#include "acenic.h"
/*
@@ -294,8 +292,103 @@
static int probed __initdata = 0;
+void ace_free_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = dev->priv;
+ int size;
+
+ if (ap->rx_std_ring != NULL) {
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+ pci_free_consistent(ap->pdev, size,
+ ap->rx_std_ring,
+ ap->rx_ring_base_dma);
+ ap->rx_std_ring = NULL;
+ ap->rx_jumbo_ring = NULL;
+ ap->rx_mini_ring = NULL;
+ ap->rx_return_ring = NULL;
+ }
+ if (ap->evt_ring != NULL) {
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size,
+ ap->evt_ring,
+ ap->evt_ring_dma);
+ ap->evt_ring = NULL;
+ }
+ if (ap->evt_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->evt_prd, ap->evt_prd_dma);
+ ap->evt_prd = NULL;
+ }
+ if (ap->rx_ret_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
+ ap->rx_ret_prd = NULL;
+ }
+ if (ap->tx_csm != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->tx_csm, ap->tx_csm_dma);
+ ap->tx_csm = NULL;
+ }
+}
+
+int ace_allocate_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = dev->priv;
+ int size;
+
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+
+ ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->rx_ring_base_dma);
+ if (ap->rx_std_ring == NULL)
+ goto fail;
+
+ ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
+ ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
+ ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
+
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+
+ ap->evt_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->evt_ring_dma);
+
+ if (ap->evt_ring == NULL)
+ goto fail;
+
+ ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->evt_prd_dma);
+ if (ap->evt_prd == NULL)
+ goto fail;
+
+ ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->rx_ret_prd_dma);
+ if (ap->rx_ret_prd == NULL)
+ goto fail;
+
+ ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->tx_csm_dma);
+ if (ap->tx_csm == NULL)
+ goto fail;
+
+ return 0;
+
+fail:
+ /* Clean up. */
+ ace_free_descriptors(dev);
+ iounmap(ap->regs);
+ unregister_netdev(dev);
+ return 1;
+}
-int __init acenic_probe(void)
+static int __init acenic_probe(void)
{
int boards_found = 0;
int version_disp;
@@ -380,16 +473,16 @@
pci_set_master(pdev);
+#ifdef __sparc__
+ /* NOTE: Cache line size is in 32-bit word units. */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x10);
+#endif
/*
* Remap the regs into kernel space - this is abuse of
* dev->base_addr since it was means for I/O port
* addresses but who gives a damn.
*/
-#if (LINUX_VERSION_CODE < 0x02030d)
- dev->base_addr = pdev->base_address[0];
-#else
dev->base_addr = pdev->resource[0].start;
-#endif
ap->regs = (struct ace_regs *)ioremap(dev->base_addr, 0x4000);
if (!ap->regs){
@@ -442,6 +535,9 @@
}
#endif
+ if (ace_allocate_descriptors(dev))
+ continue;
+
#ifdef MODULE
if (ace_init(dev, boards_found))
continue;
@@ -470,8 +566,6 @@
}
-#ifdef MODULE
-#if LINUX_VERSION_CODE > 0x20118
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@cern.ch>");
MODULE_DESCRIPTION("AceNIC/3C985 Gigabit Ethernet driver");
MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
@@ -480,10 +574,9 @@
MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
-#endif
-int init_module(void)
+static int __init acenic_init_module (void)
{
int cards;
@@ -494,7 +587,7 @@
}
-void cleanup_module(void)
+static void __exit acenic_cleanup_module (void)
{
struct ace_private *ap;
struct ace_regs *regs;
@@ -528,26 +621,46 @@
synchronize_irq();
for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
- if (ap->skb->rx_std_skbuff[i]) {
+ struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
+
+ if (skb) {
+ dma_addr_t mapping;
+
+ mapping = ap->skb->rx_std_skbuff[i].mapping;
+
ap->rx_std_ring[i].size = 0;
- set_aceaddr_bus(&ap->rx_std_ring[i].addr, 0);
- dev_kfree_skb(ap->skb->rx_std_skbuff[i]);
+ set_aceaddr(&ap->rx_std_ring[i].addr, 0);
+ pci_unmap_single(ap->pdev, mapping,
+ ACE_STD_BUFSIZE - (2 + 16));
+ dev_kfree_skb(skb);
+ ap->skb->rx_std_skbuff[i].skb = NULL;
}
}
if (ap->version >= 2) {
for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
- if (ap->skb->rx_mini_skbuff[i]) {
+ struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
+
+ if (skb) {
+ dma_addr_t mapping;
+
+ mapping = ap->skb->rx_mini_skbuff[i].mapping;
+
ap->rx_mini_ring[i].size = 0;
- set_aceaddr_bus(&ap->rx_mini_ring[i].addr, 0);
- dev_kfree_skb(ap->skb->rx_mini_skbuff[i]);
+ set_aceaddr(&ap->rx_mini_ring[i].addr, 0);
+ pci_unmap_single(ap->pdev, mapping,
+ ACE_MINI_BUFSIZE - (2 + 16));
+ dev_kfree_skb(skb);
}
}
}
+ ace_free_descriptors(root_dev);
+
iounmap(regs);
if(ap->trace_buf)
kfree(ap->trace_buf);
- kfree(ap->info);
+ pci_free_consistent(ap->pdev, sizeof(struct ace_info),
+ ap->info, ap->info_dma);
kfree(ap->skb);
free_irq(root_dev->irq, root_dev);
unregister_netdev(root_dev);
@@ -556,7 +669,9 @@
root_dev = next;
}
}
-#endif
+
+module_init(acenic_init_module);
+module_exit(acenic_cleanup_module);
/*
@@ -590,14 +705,8 @@
/*
* Don't access any other registes before this point!
*/
-#ifdef __BIG_ENDIAN
- writel(((BYTE_SWAP | WORD_SWAP | CLR_INT) |
- ((BYTE_SWAP | WORD_SWAP | CLR_INT) << 24)),
- ®s->HostCtrl);
-#else
writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
®s->HostCtrl);
-#endif
mb();
/*
@@ -640,12 +749,8 @@
* value a second time works as well. This is what caused the
* `Firmware not running' problem on the Tigon II.
*/
-#ifdef __LITTLE_ENDIAN
writel(ACE_BYTE_SWAP_DATA | ACE_WARN | ACE_FATAL |
ACE_WORD_SWAP | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
-#else
-#error "this driver doesn't run on big-endian machines yet!"
-#endif
mac1 = 0;
for(i = 0; i < 4; i++){
@@ -731,19 +836,23 @@
* and the control blocks for the transmit and receive rings
* as they need to be setup once and for all.
*/
- if (!(info = kmalloc(sizeof(struct ace_info), GFP_KERNEL)))
- return -EAGAIN;
+ info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
+ &ap->info_dma);
+ if (info == NULL)
+ goto fail;
/*
* Get the memory for the skb rings.
*/
if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL)))
- return -EAGAIN;
+ goto fail;
+
+ memset(ap->skb, 0, sizeof(struct ace_skb));
if (request_irq(dev->irq, ace_interrupt, SA_SHIRQ, ap->name, dev)) {
printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
dev->name, dev->irq);
- return -EAGAIN;
+ goto fail;
}
/*
@@ -754,13 +863,11 @@
root_dev = dev;
ap->info = info;
- memset(info, 0, sizeof(struct ace_info));
- memset(ap->skb, 0, sizeof(struct ace_skb));
ace_load_firmware(dev);
ap->fw_running = 0;
- tmp_ptr = virt_to_bus((void *)info);
+ tmp_ptr = (unsigned long) ap->info_dma;
#if (BITS_PER_LONG == 64)
writel(tmp_ptr >> 32, ®s->InfoPtrHi);
#else
@@ -770,15 +877,15 @@
memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
- set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring);
+ set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
info->evt_ctrl.flags = 0;
- set_aceaddr(&info->evt_prd_ptr, &ap->evt_prd);
- ap->evt_prd = 0;
+ set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
+ *(ap->evt_prd) = 0;
wmb();
writel(0, ®s->EvtCsm);
- set_aceaddr_bus(&info->cmd_ctrl.rngptr, (void *)0x100);
+ set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
info->cmd_ctrl.flags = 0;
info->cmd_ctrl.max_len = 0;
@@ -788,30 +895,35 @@
writel(0, ®s->CmdPrd);
writel(0, ®s->CmdCsm);
- set_aceaddr(&info->stats2_ptr, &info->s.stats);
-
- set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_std_ring);
- info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
- info->rx_std_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
+ tmp_ptr = ap->info_dma;
+ tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
+ set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
+
+ set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
+ info->rx_std_ctrl.max_len = cpu_to_le16(ACE_STD_MTU + ETH_HLEN + 4);
+ info->rx_std_ctrl.flags = cpu_to_le16(RCB_FLG_TCP_UDP_SUM);
memset(ap->rx_std_ring, 0,
RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_STD_RING_ENTRIES; i++)
- ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
+ ap->rx_std_ring[i].flags = cpu_to_le16(BD_FLG_TCP_UDP_SUM);
ap->rx_std_skbprd = 0;
atomic_set(&ap->cur_rx_bufs, 0);
- set_aceaddr(&info->rx_jumbo_ctrl.rngptr, ap->rx_jumbo_ring);
+ set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
info->rx_jumbo_ctrl.max_len = 0;
- info->rx_jumbo_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
+ info->rx_jumbo_ctrl.flags = cpu_to_le16(RCB_FLG_TCP_UDP_SUM);
memset(ap->rx_jumbo_ring, 0,
RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
- ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
+ ap->rx_jumbo_ring[i].flags =
+ cpu_to_le16(BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO);
ap->rx_jumbo_skbprd = 0;
atomic_set(&ap->cur_jumbo_bufs, 0);
@@ -820,30 +932,40 @@
RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
if (ap->version >= 2) {
- set_aceaddr(&info->rx_mini_ctrl.rngptr, ap->rx_mini_ring);
- info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
- info->rx_mini_ctrl.flags = RCB_FLG_TCP_UDP_SUM;
+ set_aceaddr(&info->rx_mini_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES))));
+ info->rx_mini_ctrl.max_len = cpu_to_le16(ACE_MINI_SIZE);
+ info->rx_mini_ctrl.flags = cpu_to_le16(RCB_FLG_TCP_UDP_SUM);
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
ap->rx_mini_ring[i].flags =
- BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
+ cpu_to_le16(BD_FLG_TCP_UDP_SUM | BD_FLG_MINI);
} else {
set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
- info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
+ info->rx_mini_ctrl.flags = cpu_to_le16(RCB_FLG_RNG_DISABLE);
info->rx_mini_ctrl.max_len = 0;
}
ap->rx_mini_skbprd = 0;
atomic_set(&ap->cur_mini_bufs, 0);
- set_aceaddr(&info->rx_return_ctrl.rngptr, ap->rx_return_ring);
+ set_aceaddr(&info->rx_return_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES))));
info->rx_return_ctrl.flags = 0;
- info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
+ info->rx_return_ctrl.max_len = cpu_to_le16(RX_RETURN_RING_ENTRIES);
memset(ap->rx_return_ring, 0,
RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
- set_aceaddr(&info->rx_ret_prd_ptr, &ap->rx_ret_prd);
+ set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
+ *(ap->rx_ret_prd) = 0;
writel(TX_RING_BASE, ®s->WinBase);
ap->tx_ring = (struct tx_desc *)regs->Window;
@@ -851,15 +973,15 @@
writel(0, (unsigned long)ap->tx_ring + i * 4);
}
- set_aceaddr_bus(&info->tx_ctrl.rngptr, (void *)TX_RING_BASE);
- info->tx_ctrl.max_len = TX_RING_ENTRIES;
+ set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
+ info->tx_ctrl.max_len = cpu_to_le16(TX_RING_ENTRIES);
#if TX_COAL_INTS_ONLY
- info->tx_ctrl.flags = RCB_FLG_COAL_INT_ONLY;
+ info->tx_ctrl.flags = cpu_to_le16(RCB_FLG_COAL_INT_ONLY);
#else
info->tx_ctrl.flags = 0;
#endif
- set_aceaddr(&info->tx_csm_ptr, &ap->tx_csm);
+ set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
/*
* Potential item for tuning parameter
@@ -975,7 +1097,7 @@
*/
ap->tx_full = 0;
ap->cur_rx = 0;
- ap->tx_prd = ap->tx_csm = ap->tx_ret_csm = 0;
+ ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
wmb();
writel(0, ®s->TxPrd);
@@ -1015,6 +1137,17 @@
"the RX mini ring\n", dev->name);
}
return 0;
+
+fail:
+ if (info != NULL)
+ pci_free_consistent(ap->pdev, sizeof(struct ace_info),
+ info, ap->info_dma);
+ if (ap->skb != NULL) {
+ kfree(ap->skb);
+ ap->skb = NULL;
+ }
+
+ return -EAGAIN;
}
@@ -1032,7 +1165,7 @@
* seconds and there is data in the transmit queue, thus we
* asume the card is stuck.
*/
- if (ap->tx_csm != ap->tx_ret_csm){
+ if (le32_to_cpu(*(ap->tx_csm)) != ap->tx_ret_csm){
printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
dev->name, (unsigned int)readl(®s->HostCtrl));
}
@@ -1112,18 +1245,22 @@
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
+ dma_addr_t mapping;
skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
/*
* Make sure IP header starts on a fresh cache line.
*/
skb_reserve(skb, 2 + 16);
- ap->skb->rx_std_skbuff[idx] = skb;
+ mapping = pci_map_single(ap->pdev, skb->data,
+ ACE_STD_BUFSIZE - (2 + 16));
+ ap->skb->rx_std_skbuff[idx].skb = skb;
+ ap->skb->rx_std_skbuff[idx].mapping = mapping;
rd = &ap->rx_std_ring[idx];
- set_aceaddr(&rd->addr, skb->data);
- rd->size = ACE_STD_MTU + ETH_HLEN + 4;
- rd->idx = idx;
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = cpu_to_le16(ACE_STD_MTU + ETH_HLEN + 4);
+ rd->idx = cpu_to_le16(idx);
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
@@ -1157,18 +1294,22 @@
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
+ dma_addr_t mapping;
skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
/*
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
- ap->skb->rx_mini_skbuff[idx] = skb;
+ mapping = pci_map_single(ap->pdev, skb->data,
+ ACE_MINI_BUFSIZE - (2 + 16));
+ ap->skb->rx_mini_skbuff[idx].skb = skb;
+ ap->skb->rx_mini_skbuff[idx].mapping = mapping;
rd = &ap->rx_mini_ring[idx];
- set_aceaddr(&rd->addr, skb->data);
- rd->size = ACE_MINI_SIZE;
- rd->idx = idx;
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = cpu_to_le16(ACE_MINI_SIZE);
+ rd->idx = cpu_to_le16(idx);
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
@@ -1200,18 +1341,22 @@
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
struct rx_desc *rd;
+ dma_addr_t mapping;
skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
/*
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
- ap->skb->rx_jumbo_skbuff[idx] = skb;
+ mapping = pci_map_single(ap->pdev, skb->data,
+ ACE_JUMBO_BUFSIZE - (2 + 16));
+ ap->skb->rx_jumbo_skbuff[idx].skb = skb;
+ ap->skb->rx_jumbo_skbuff[idx].mapping = mapping;
rd = &ap->rx_jumbo_ring[idx];
- set_aceaddr(&rd->addr, skb->data);
- rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
- rd->idx = idx;
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = cpu_to_le16(ACE_JUMBO_MTU + ETH_HLEN + 4);
+ rd->idx = cpu_to_le16(idx);
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
@@ -1254,10 +1399,20 @@
ace_issue_cmd(regs, &cmd);
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
- if (ap->skb->rx_jumbo_skbuff[i]) {
+ struct sk_buff *skb;
+
+ skb = ap->skb->rx_jumbo_skbuff[i].skb;
+ if (skb) {
+ dma_addr_t mapping;
+
+ mapping = ap->skb->rx_jumbo_skbuff[i].mapping;
+
ap->rx_jumbo_ring[i].size = 0;
- set_aceaddr_bus(&ap->rx_jumbo_ring[i].addr, 0);
- dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i]);
+ set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
+ pci_unmap_single(ap->pdev, mapping,
+ ACE_JUMBO_BUFSIZE - (2 + 16));
+ dev_kfree_skb(skb);
+ ap->skb->rx_jumbo_skbuff[i].skb = NULL;
}
}
}else
@@ -1280,7 +1435,11 @@
ap = (struct ace_private *)dev->priv;
while (evtcsm != evtprd){
- switch (ap->evt_ring[evtcsm].evt){
+ struct event evt_local;
+
+ memcpy(&evt_local, &ap->evt_ring[evtcsm], sizeof(evt_local));
+ evt_local.u.word = le32_to_cpu(evt_local.u.word);
+ switch (evt_local.u.data.evt){
case E_FW_RUNNING:
printk(KERN_INFO "%s: Firmware up and running\n",
dev->name);
@@ -1290,7 +1449,7 @@
break;
case E_LNK_STATE:
{
- u16 code = ap->evt_ring[evtcsm].code;
+ u16 code = evt_local.u.data.code;
if (code == E_C_LINK_UP){
printk(KERN_WARNING "%s: Optical link UP\n",
dev->name);
@@ -1304,7 +1463,7 @@
break;
}
case E_ERROR:
- switch(ap->evt_ring[evtcsm].code){
+ switch(evt_local.u.data.code){
case E_C_ERR_INVAL_CMD:
printk(KERN_ERR "%s: invalid command error\n",
dev->name);
@@ -1319,14 +1478,14 @@
break;
default:
printk(KERN_ERR "%s: unknown error %02x\n",
- dev->name, ap->evt_ring[evtcsm].code);
+ dev->name, evt_local.u.data.code);
}
break;
case E_RESET_JUMBO_RNG:
break;
default:
printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
- dev->name, ap->evt_ring[evtcsm].evt);
+ dev->name, evt_local.u.data.evt);
}
evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
}
@@ -1344,14 +1503,16 @@
idx = rxretcsm;
while (idx != rxretprd){
- struct sk_buff *skb, **oldskb_p;
+ struct ring_info *rip;
+ struct sk_buff *skb;
struct rx_desc *rxdesc;
+ dma_addr_t mapping;
u32 skbidx;
- int desc_type;
+ int desc_type, mapsize;
u16 csum;
- skbidx = ap->rx_return_ring[idx].idx;
- desc_type = ap->rx_return_ring[idx].flags &
+ skbidx = le16_to_cpu(ap->rx_return_ring[idx].idx);
+ desc_type = le16_to_cpu(ap->rx_return_ring[idx].flags) &
(BD_FLG_JUMBO | BD_FLG_MINI);
switch(desc_type) {
@@ -1363,42 +1524,47 @@
* atomic operations for each packet arriving.
*/
case 0:
- oldskb_p = &ap->skb->rx_std_skbuff[skbidx];
+ rip = &ap->skb->rx_std_skbuff[skbidx];
+ mapsize = ACE_STD_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
- oldskb_p = &ap->skb->rx_jumbo_skbuff[skbidx];
+ rip = &ap->skb->rx_jumbo_skbuff[skbidx];
+ mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
- oldskb_p = &ap->skb->rx_mini_skbuff[skbidx];
+ rip = &ap->skb->rx_mini_skbuff[skbidx];
+ mapsize = ACE_MINI_BUFSIZE - (2 + 16);
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
default:
printk(KERN_INFO "%s: unknown frame type (0x%02x) "
"returned by NIC\n", dev->name,
- ap->rx_return_ring[idx].flags);
+ le16_to_cpu(ap->rx_return_ring[idx].flags));
goto error;
}
- skb = *oldskb_p;
+ skb = rip->skb;
+ mapping = rip->mapping;
#if DEBUG
if (skb == NULL) {
printk("Mayday! illegal skb received! (idx %i)\n", skbidx);
goto error;
}
#endif
- *oldskb_p = NULL;
- skb_put(skb, rxdesc->size);
+ rip->skb = NULL;
+ pci_unmap_single(ap->pdev, mapping, mapsize);
+ skb_put(skb, le16_to_cpu(rxdesc->size));
rxdesc->size = 0;
/*
* Fly baby, fly!
*/
- csum = ap->rx_return_ring[idx].tcp_udp_csum;
+ csum = le16_to_cpu(ap->rx_return_ring[idx].tcp_udp_csum);
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
@@ -1476,22 +1642,30 @@
* working on the other stuff - hey we don't need a spin lock
* anymore.
*/
- rxretprd = ap->rx_ret_prd;
+ rxretprd = le32_to_cpu(*(ap->rx_ret_prd));
rxretcsm = ap->cur_rx;
if (rxretprd != rxretcsm)
ace_rx_int(dev, rxretprd, rxretcsm);
- txcsm = ap->tx_csm;
+ txcsm = le32_to_cpu(*(ap->tx_csm));
idx = ap->tx_ret_csm;
if (txcsm != idx) {
do {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = ap->skb->tx_skbuff[idx].skb;
+ mapping = ap->skb->tx_skbuff[idx].mapping;
+
ap->stats.tx_packets++;
- ap->stats.tx_bytes += ap->skb->tx_skbuff[idx]->len;
- dev_kfree_skb(ap->skb->tx_skbuff[idx]);
+ ap->stats.tx_bytes += skb->len;
+
+ pci_unmap_single(ap->pdev, mapping, skb->len);
+ dev_kfree_skb(skb);
- ap->skb->tx_skbuff[idx] = NULL;
+ ap->skb->tx_skbuff[idx].skb = NULL;
/*
* Question here is whether one should not skip
@@ -1514,14 +1688,14 @@
* Ie. skip the comparison of the tx producer vs. the
* consumer.
*/
- if (ap->tx_full && dev->tbusy) {
+ if (ap->tx_full &&
+ test_bit(LINK_STATE_XOFF, &dev->state)) {
ap->tx_full = 0;
/*
* This does not need to be atomic (and expensive),
* I've seen cases where it would fail otherwise ;-(
*/
- clear_bit(0, &dev->tbusy);
- mark_bh(NET_BH);
+ netif_wake_queue(dev);
/*
* TX ring is no longer full, aka the
@@ -1535,7 +1709,7 @@
}
evtcsm = readl(®s->EvtCsm);
- evtprd = ap->evt_prd;
+ evtprd = le32_to_cpu(*(ap->evt_prd));
if (evtcsm != evtprd) {
evtcsm = ace_handle_event(dev, evtcsm, evtprd);
@@ -1546,7 +1720,7 @@
* This has to go last in the interrupt handler and run with
* the spin lock released ... what lock?
*/
- if (dev->start) {
+ if (test_bit(LINK_STATE_START, &dev->state)) {
int cur_size;
int run_bh = 0;
@@ -1651,10 +1825,6 @@
ace_issue_cmd(regs, &cmd);
#endif
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
MOD_INC_USE_COUNT;
/*
@@ -1684,8 +1854,7 @@
unsigned long flags;
short i;
- dev->start = 0;
- set_bit(0, &dev->tbusy);
+ netif_stop_queue(dev);
ap = (struct ace_private *)dev->priv;
regs = ap->regs;
@@ -1713,11 +1882,17 @@
cli();
for (i = 0; i < TX_RING_ENTRIES; i++) {
- if (ap->skb->tx_skbuff[i]) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = ap->skb->tx_skbuff[i].skb;
+ mapping = ap->skb->tx_skbuff[i].mapping;
+ if (skb) {
writel(0, &ap->tx_ring[i].addr.addrhi);
writel(0, &ap->tx_ring[i].addr.addrlo);
writel(0, &ap->tx_ring[i].flagsize);
- dev_kfree_skb(ap->skb->tx_skbuff[i]);
+ pci_unmap_single(ap->pdev, mapping, skb->len);
+ dev_kfree_skb(skb);
}
}
@@ -1738,9 +1913,6 @@
unsigned long addr;
u32 idx, flagsize;
- if (test_and_set_bit(0, &dev->tbusy))
- return 1;
-
idx = ap->tx_prd;
if ((idx + 1) % TX_RING_ENTRIES == ap->tx_ret_csm) {
@@ -1752,8 +1924,10 @@
return 1;
}
- ap->skb->tx_skbuff[idx] = skb;
- addr = virt_to_bus(skb->data);
+ ap->skb->tx_skbuff[idx].skb = skb;
+ ap->skb->tx_skbuff[idx].mapping =
+ pci_map_single(ap->pdev, skb->data, skb->len);
+ addr = (unsigned long) ap->skb->tx_skbuff[idx].mapping;
#if (BITS_PER_LONG == 64)
writel(addr >> 32, &ap->tx_ring[idx].addr.addrhi);
#endif
@@ -1784,7 +1958,7 @@
/*
* No need for it to be atomic - seems it needs to be
*/
- clear_bit(0, &dev->tbusy);
+ netif_stop_queue(dev);
}
dev->trans_start = jiffies;
@@ -1956,7 +2130,7 @@
u16 *da;
struct cmd cmd;
- if(dev->start)
+ if(test_bit(LINK_STATE_START, &dev->state))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
@@ -2038,11 +2212,11 @@
}
-void __init ace_copy(struct ace_regs *regs, void *src, u32 dest, int size)
+void __init ace_copy(struct ace_regs *regs, void *src, unsigned long dest, int size)
{
unsigned long tdest;
u32 *wsrc;
- short tsize, i;
+ unsigned long tsize, i;
if (size <= 0)
return;
@@ -2053,14 +2227,10 @@
tdest = (unsigned long)®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
-#ifdef __BIG_ENDIAN
-#error "data must be swapped here"
-#else
wsrc = src;
for (i = 0; i < (tsize / 4); i++){
writel(wsrc[i], tdest + i*4);
}
-#endif
dest += tsize;
src += tsize;
size -= tsize;
@@ -2070,10 +2240,10 @@
}
-void __init ace_clear(struct ace_regs *regs, u32 dest, int size)
+void __init ace_clear(struct ace_regs *regs, unsigned long dest, int size)
{
unsigned long tdest;
- short tsize = 0, i;
+ unsigned long tsize = 0, i;
if (size <= 0)
return;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)