[PATCH] PPC64 replace last usage of vio dma mapping routines

[PATCH] PPC64 replace last usage of vio dma mapping routines

Post by Paul Macke » Sat, 05 Feb 2005 19:30:20


his patch is from Stephen Rothwell < XXXX@XXXXX.COM >.

This patch just replaces the last usage of the vio dma mapping routines
with the equivalent generic dma mapping routines.

Signed-off-by: Stephen Rothwell < XXXX@XXXXX.COM >
Signed-off-by: Paul Mackerras < XXXX@XXXXX.COM >

diff -ruNp linus-bk/drivers/net/ibmveth.c linus-bk-vio.1/drivers/net/ibmveth.c
--- linus-bk/drivers/net/ibmveth.c 2004-12-08 04:06:06.000000000 +1100
+++ linus-bk-vio.1/drivers/net/ibmveth.c 2005-01-31 16:45:28.000000000 +1100
@@ -218,7 +218,8 @@ static void ibmveth_replenish_buffer_poo
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
ibmveth_assert(pool->skbuff[index] == NULL);

- dma_addr = vio_map_single(adapter->vdev, skb->data, pool->buff_size, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);

pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
@@ -238,7 +239,9 @@ static void ibmveth_replenish_buffer_poo
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->skbuff[index] = NULL;
pool->consumer_index--;
- vio_unmap_single(adapter->vdev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
break;
@@ -299,7 +302,7 @@ static void ibmveth_free_buffer_pool(str
for(i = 0; i < pool->size; ++i) {
struct sk_buff *skb = pool->skbuff[i];
if(skb) {
- vio_unmap_single(adapter->vdev,
+ dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i],
pool->buff_size,
DMA_FROM_DEVICE);
@@ -337,7 +340,7 @@ static void ibmveth_remove_buffer_from_p

adapter->rx_buff_pool[pool].skbuff[index] = NULL;

- vio_unmap_single(adapter->vdev,
+ dma_unmap_single(&adapter->vdev->dev,
adapter->rx_buff_pool[pool].dma_addr[index],
adapter->rx_buff_pool[pool].buff_size,
DMA_FROM_DEVICE);
@@ -408,7 +411,9 @@ static void ibmveth_cleanup(struct ibmve
{
if(adapter->buffer_list_addr != NULL) {
if(!dma_mapping_error(adapter->buffer_list_dma)) {
- vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->buffer_list_addr);
@@ -417,7 +422,9 @@ static void ibmveth_cleanup(struct ibmve

if(adapter->filter_list_addr != NULL) {
if(!dma_mapping_error(adapter->filter_list_dma)) {
- vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->filter_list_addr);
@@ -426,7 +433,10 @@ static void ibmveth_cleanup(struct ibmve

if(adapter->rx_queue.queue_addr != NULL) {
if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
- vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECT