summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-27 18:37:58 -0700
committerDavid S. Miller <davem@davemloft.net>2008-08-29 02:13:12 -0700
commit7a715f46012f3552294154978aed59cba9804928 (patch)
tree9e7079ae4ad370b63ac6c8a4face573d6cf60533 /drivers/net
parent5778002874de0fb7e3d8c4a0a4afb6b1a6297069 (diff)
sparc: Make SBUS DMA interfaces take struct device.
This is the first step in converting all the SBUS drivers over to generic dma_*(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/myri_sbus.c27
-rw-r--r--drivers/net/sunbmac.c28
-rw-r--r--drivers/net/sunhme.c36
-rw-r--r--drivers/net/sunhme.h1
-rw-r--r--drivers/net/sunlance.c5
-rw-r--r--drivers/net/sunqe.c12
6 files changed, 61 insertions, 48 deletions
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 656a260fc956..c17462159d9d 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -243,7 +243,8 @@ static void myri_clean_rings(struct myri_eth *mp)
u32 dma_addr;
dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
- sbus_unmap_single(mp->myri_sdev, dma_addr, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+ sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+ RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
dev_kfree_skb(mp->rx_skbs[i]);
mp->rx_skbs[i] = NULL;
}
@@ -259,7 +260,9 @@ static void myri_clean_rings(struct myri_eth *mp)
u32 dma_addr;
dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
- sbus_unmap_single(mp->myri_sdev, dma_addr, (skb->len + 3) & ~3, SBUS_DMA_TODEVICE);
+ sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+ (skb->len + 3) & ~3,
+ SBUS_DMA_TODEVICE);
dev_kfree_skb(mp->tx_skbs[i]);
mp->tx_skbs[i] = NULL;
}
@@ -288,7 +291,9 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
skb->dev = dev;
skb_put(skb, RX_ALLOC_SIZE);
- dma_addr = sbus_map_single(mp->myri_sdev, skb->data, RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
+ dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev,
+ skb->data, RX_ALLOC_SIZE,
+ SBUS_DMA_FROMDEVICE);
sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
sbus_writel(i, &rxd[i].ctx);
@@ -344,7 +349,8 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev)
DTX(("SKB[%d] ", entry));
dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
- sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE);
+ sbus_unmap_single(&mp->myri_sdev->ofdev.dev, dma_addr,
+ skb->len, SBUS_DMA_TODEVICE);
dev_kfree_skb(skb);
mp->tx_skbs[entry] = NULL;
dev->stats.tx_packets++;
@@ -423,7 +429,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
/* Check for errors. */
DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
- sbus_dma_sync_single_for_cpu(mp->myri_sdev,
+ sbus_dma_sync_single_for_cpu(&mp->myri_sdev->ofdev.dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE);
if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
@@ -442,7 +448,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
drops++;
DRX(("DROP "));
dev->stats.rx_dropped++;
- sbus_dma_sync_single_for_device(mp->myri_sdev,
+ sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
SBUS_DMA_FROMDEVICE);
@@ -464,14 +470,14 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
DRX(("skb_alloc(FAILED) "));
goto drop_it;
}
- sbus_unmap_single(mp->myri_sdev,
+ sbus_unmap_single(&mp->myri_sdev->ofdev.dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
SBUS_DMA_FROMDEVICE);
mp->rx_skbs[index] = new_skb;
new_skb->dev = dev;
skb_put(new_skb, RX_ALLOC_SIZE);
- dma_addr = sbus_map_single(mp->myri_sdev,
+ dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev,
new_skb->data,
RX_ALLOC_SIZE,
SBUS_DMA_FROMDEVICE);
@@ -500,7 +506,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
/* Reuse original ring buffer. */
DRX(("reuse "));
- sbus_dma_sync_single_for_device(mp->myri_sdev,
+ sbus_dma_sync_single_for_device(&mp->myri_sdev->ofdev.dev,
sbus_readl(&rxd->myri_scatters[0].addr),
RX_ALLOC_SIZE,
SBUS_DMA_FROMDEVICE);
@@ -652,7 +658,8 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
}
- dma_addr = sbus_map_single(mp->myri_sdev, skb->data, len, SBUS_DMA_TODEVICE);
+ dma_addr = sbus_map_single(&mp->myri_sdev->ofdev.dev, skb->data,
+ len, SBUS_DMA_TODEVICE);
sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
sbus_writel(len, &txd->myri_gathers[0].len);
sbus_writel(1, &txd->num_sg);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 0e4a88d16327..b92218c2f76c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -239,7 +239,7 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
skb_reserve(skb, 34);
bb->be_rxd[i].rx_addr =
- sbus_map_single(bp->bigmac_sdev, skb->data,
+ sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
RX_BUF_ALLOC_SIZE - 34,
SBUS_DMA_FROMDEVICE);
bb->be_rxd[i].rx_flags =
@@ -776,7 +776,7 @@ static void bigmac_tx(struct bigmac *bp)
skb = bp->tx_skbs[elem];
bp->enet_stats.tx_packets++;
bp->enet_stats.tx_bytes += skb->len;
- sbus_unmap_single(bp->bigmac_sdev,
+ sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev,
this->tx_addr, skb->len,
SBUS_DMA_TODEVICE);
@@ -831,7 +831,7 @@ static void bigmac_rx(struct bigmac *bp)
drops++;
goto drop_it;
}
- sbus_unmap_single(bp->bigmac_sdev,
+ sbus_unmap_single(&bp->bigmac_sdev->ofdev.dev,
this->rx_addr,
RX_BUF_ALLOC_SIZE - 34,
SBUS_DMA_FROMDEVICE);
@@ -839,10 +839,11 @@ static void bigmac_rx(struct bigmac *bp)
new_skb->dev = bp->dev;
skb_put(new_skb, ETH_FRAME_LEN);
skb_reserve(new_skb, 34);
- this->rx_addr = sbus_map_single(bp->bigmac_sdev,
- new_skb->data,
- RX_BUF_ALLOC_SIZE - 34,
- SBUS_DMA_FROMDEVICE);
+ this->rx_addr =
+ sbus_map_single(&bp->bigmac_sdev->ofdev.dev,
+ new_skb->data,
+ RX_BUF_ALLOC_SIZE - 34,
+ SBUS_DMA_FROMDEVICE);
this->rx_flags =
(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
@@ -857,11 +858,11 @@ static void bigmac_rx(struct bigmac *bp)
}
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
+ sbus_dma_sync_single_for_cpu(&bp->bigmac_sdev->ofdev.dev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
- sbus_dma_sync_single_for_device(bp->bigmac_sdev,
+ sbus_dma_sync_single_for_device(&bp->bigmac_sdev->ofdev.dev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
@@ -959,7 +960,8 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 mapping;
len = skb->len;
- mapping = sbus_map_single(bp->bigmac_sdev, skb->data, len, SBUS_DMA_TODEVICE);
+ mapping = sbus_map_single(&bp->bigmac_sdev->ofdev.dev, skb->data,
+ len, SBUS_DMA_TODEVICE);
/* Avoid a race... */
spin_lock_irq(&bp->lock);
@@ -1183,7 +1185,7 @@ static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
bigmac_stop(bp);
/* Allocate transmit/receive descriptor DVMA block. */
- bp->bmac_block = sbus_alloc_consistent(bp->bigmac_sdev,
+ bp->bmac_block = sbus_alloc_consistent(&bp->bigmac_sdev->ofdev.dev,
PAGE_SIZE,
&bp->bblock_dvma);
if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
@@ -1245,7 +1247,7 @@ fail_and_cleanup:
sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
if (bp->bmac_block)
- sbus_free_consistent(bp->bigmac_sdev,
+ sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev,
PAGE_SIZE,
bp->bmac_block,
bp->bblock_dvma);
@@ -1280,7 +1282,7 @@ static int __devexit bigmac_sbus_remove(struct of_device *dev)
sbus_iounmap(bp->creg, CREG_REG_SIZE);
sbus_iounmap(bp->bregs, BMAC_REG_SIZE);
sbus_iounmap(bp->tregs, TCVR_REG_SIZE);
- sbus_free_consistent(bp->bigmac_sdev,
+ sbus_free_consistent(&bp->bigmac_sdev->ofdev.dev,
PAGE_SIZE,
bp->bmac_block,
bp->bblock_dvma);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b79d5f018f79..cd93fc5e826a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -251,13 +251,13 @@ static u32 pci_hme_read_desc32(hme32 *p)
#define hme_read_desc32(__hp, __p) \
((__hp)->read_desc32(__p))
#define hme_dma_map(__hp, __ptr, __size, __dir) \
- ((__hp)->dma_map((__hp)->happy_dev, (__ptr), (__size), (__dir)))
+ ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
- ((__hp)->dma_unmap((__hp)->happy_dev, (__addr), (__size), (__dir)))
+ ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
- ((__hp)->dma_sync_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir)))
+ ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
- ((__hp)->dma_sync_for_device((__hp)->happy_dev, (__addr), (__size), (__dir)))
+ ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
#else
#ifdef CONFIG_SBUS
/* SBUS only compilation */
@@ -277,13 +277,13 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
} while(0)
#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
#define hme_dma_map(__hp, __ptr, __size, __dir) \
- sbus_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
+ sbus_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
- sbus_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
+ sbus_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
- sbus_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
+ sbus_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
- sbus_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
+ sbus_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
#else
/* PCI only compilation */
#define hme_write32(__hp, __reg, __val) \
@@ -305,13 +305,13 @@ static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
return le32_to_cpup((__le32 *)p);
}
#define hme_dma_map(__hp, __ptr, __size, __dir) \
- pci_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
+ pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
- pci_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
+ pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
- pci_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
+ pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
- pci_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
+ pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
#endif
#endif
@@ -2716,6 +2716,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
hp = dev->priv;
hp->happy_dev = sdev;
+ hp->dma_dev = &sdev->ofdev.dev;
spin_lock_init(&hp->happy_lock);
@@ -2785,7 +2786,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
hp->happy_bursts = of_getintprop_default(sdev->bus->ofdev.node,
"burst-sizes", 0x00);
- hp->happy_block = sbus_alloc_consistent(hp->happy_dev,
+ hp->happy_block = sbus_alloc_consistent(hp->dma_dev,
PAGE_SIZE,
&hp->hblock_dvma);
err = -ENOMEM;
@@ -2860,7 +2861,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
return 0;
err_out_free_consistent:
- sbus_free_consistent(hp->happy_dev,
+ sbus_free_consistent(hp->dma_dev,
PAGE_SIZE,
hp->happy_block,
hp->hblock_dvma);
@@ -3035,6 +3036,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
memset(hp, 0, sizeof(*hp));
hp->happy_dev = pdev;
+ hp->dma_dev = pdev;
spin_lock_init(&hp->happy_lock);
@@ -3231,12 +3233,12 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
unregister_netdev(net_dev);
- pci_free_consistent(hp->happy_dev,
+ pci_free_consistent(hp->dma_dev,
PAGE_SIZE,
hp->happy_block,
hp->hblock_dvma);
iounmap(hp->gregs);
- pci_release_regions(hp->happy_dev);
+ pci_release_regions(hp->dma_dev);
free_netdev(net_dev);
@@ -3306,7 +3308,7 @@ static int __devexit hme_sbus_remove(struct of_device *dev)
sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
- sbus_free_consistent(hp->happy_dev,
+ sbus_free_consistent(hp->dma_dev,
PAGE_SIZE,
hp->happy_block,
hp->hblock_dvma);
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
index 4da5539fac7b..756e96e5ef4b 100644
--- a/drivers/net/sunhme.h
+++ b/drivers/net/sunhme.h
@@ -413,6 +413,7 @@ struct happy_meal {
/* This is either a sbus_dev or a pci_dev. */
void *happy_dev;
+ void *dma_dev;
spinlock_t happy_lock;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 24ffecb1ce23..4f4baf9f4ec8 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1283,7 +1283,7 @@ static void lance_free_hwresources(struct lance_private *lp)
sbus_iounmap(lp->init_block_iomem,
sizeof(struct lance_init_block));
} else if (lp->init_block_mem) {
- sbus_free_consistent(lp->sdev,
+ sbus_free_consistent(&lp->sdev->ofdev.dev,
sizeof(struct lance_init_block),
lp->init_block_mem,
lp->init_block_dvma);
@@ -1384,7 +1384,8 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev,
lp->tx = lance_tx_pio;
} else {
lp->init_block_mem =
- sbus_alloc_consistent(sdev, sizeof(struct lance_init_block),
+ sbus_alloc_consistent(&sdev->ofdev.dev,
+ sizeof(struct lance_init_block),
&lp->init_block_dvma);
if (!lp->init_block_mem || lp->init_block_dvma == 0) {
printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index e811331d4608..ac8049cab247 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -879,10 +879,10 @@ static int __devinit qec_ether_init(struct sbus_dev *sdev)
goto fail;
}
- qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
+ qe->qe_block = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev,
PAGE_SIZE,
&qe->qblock_dvma);
- qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
+ qe->buffers = sbus_alloc_consistent(&qe->qe_sdev->ofdev.dev,
sizeof(struct sunqe_buffers),
&qe->buffers_dvma);
if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
@@ -926,12 +926,12 @@ fail:
if (qe->mregs)
sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
if (qe->qe_block)
- sbus_free_consistent(qe->qe_sdev,
+ sbus_free_consistent(&qe->qe_sdev->ofdev.dev,
PAGE_SIZE,
qe->qe_block,
qe->qblock_dvma);
if (qe->buffers)
- sbus_free_consistent(qe->qe_sdev,
+ sbus_free_consistent(&qe->qe_sdev->ofdev.dev,
sizeof(struct sunqe_buffers),
qe->buffers,
qe->buffers_dvma);
@@ -957,11 +957,11 @@ static int __devexit qec_sbus_remove(struct of_device *dev)
sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
- sbus_free_consistent(qp->qe_sdev,
+ sbus_free_consistent(&qp->qe_sdev->ofdev.dev,
PAGE_SIZE,
qp->qe_block,
qp->qblock_dvma);
- sbus_free_consistent(qp->qe_sdev,
+ sbus_free_consistent(&qp->qe_sdev->ofdev.dev,
sizeof(struct sunqe_buffers),
qp->buffers,
qp->buffers_dvma);