summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c18
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c52
3 files changed, 33 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 028a60bbfca9..0315f99e4191 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
{
int i;
struct sp_chunk *new_head;
+ dma_addr_t dma_addr;
- new_head = (struct sp_chunk *) __get_free_page(gfp_mask);
+ new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
+ &dma_addr, gfp_mask);
if (new_head == NULL)
return -ENOMEM;
- new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ new_head->dma_addr = dma_addr;
pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
while (root) {
next = root->next;
- dma_unmap_single(c2dev->ibdev.dma_device,
- pci_unmap_addr(root, mapping), PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page((struct page *) root);
+ dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
+ pci_unmap_addr(root, mapping));
root = next;
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 9d7bcc5ade93..05c9154d46f4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
-
- dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping),
- mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
- free_pages((unsigned long) mq->msg_pool.host,
- get_order(mq->q_size * mq->msg_size));
+ dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
+ mq->msg_pool.host, pci_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
int msg_size)
{
- unsigned long pool_start;
+ u8 *pool_start;
- pool_start = __get_free_pages(GFP_KERNEL,
- get_order(q_size * msg_size));
+ pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
+ &mq->host_dma, GFP_KERNEL);
if (!pool_start)
return -ENOMEM;
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
0, /* index (currently unknown) */
q_size,
msg_size,
- (u8 *) pool_start,
+ pool_start,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
- mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
- (void *)pool_start,
- q_size * msg_size, DMA_FROM_DEVICE);
pci_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 30409e179606..030238d335ed 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
/* Initialize the Verbs Reply Queue */
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
- q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
+ q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
+ &c2dev->rep_vq.host_dma, GFP_KERNEL);
if (!q1_pages) {
err = -ENOMEM;
goto bail1;
}
- c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
- (void *)q1_pages, qsize * msgsize,
- DMA_FROM_DEVICE);
pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
@@ -540,14 +538,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
/* Initialize the Asynchronus Event Queue */
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
- q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
+ q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
+ &c2dev->aeq.host_dma, GFP_KERNEL);
if (!q2_pages) {
err = -ENOMEM;
goto bail2;
}
- c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
- (void *)q2_pages, qsize * msgsize,
- DMA_FROM_DEVICE);
pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
bail4:
vq_term(c2dev);
bail3:
- dma_unmap_single(c2dev->ibdev.dma_device,
- pci_unmap_addr(&c2dev->aeq, mapping),
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- DMA_FROM_DEVICE);
- kfree(q2_pages);
+ dma_free_coherent(&c2dev->pcidev->dev,
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
+ q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
bail2:
- dma_unmap_single(c2dev->ibdev.dma_device,
- pci_unmap_addr(&c2dev->rep_vq, mapping),
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- DMA_FROM_DEVICE);
- kfree(q1_pages);
+ dma_free_coherent(&c2dev->pcidev->dev,
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
+ q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
bail1:
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
bail0:
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
/* Free the verbs request allocator */
vq_term(c2dev);
- /* Unmap and free the asynchronus event queue */
- dma_unmap_single(c2dev->ibdev.dma_device,
- pci_unmap_addr(&c2dev->aeq, mapping),
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
- DMA_FROM_DEVICE);
- kfree(c2dev->aeq.msg_pool.host);
-
- /* Unmap and free the verbs reply queue */
- dma_unmap_single(c2dev->ibdev.dma_device,
- pci_unmap_addr(&c2dev->rep_vq, mapping),
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- DMA_FROM_DEVICE);
- kfree(c2dev->rep_vq.msg_pool.host);
+ /* Free the asynchronus event queue */
+ dma_free_coherent(&c2dev->pcidev->dev,
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
+ c2dev->aeq.msg_pool.host,
+ pci_unmap_addr(&c2dev->aeq, mapping));
+
+ /* Free the verbs reply queue */
+ dma_free_coherent(&c2dev->pcidev->dev,
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
+ c2dev->rep_vq.msg_pool.host,
+ pci_unmap_addr(&c2dev->rep_vq, mapping));
/* Free the MQ shared pointer pool */
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);