Skip to content

Commit

Permalink
bnxt_en: Let the page pool manage the DMA mapping
Browse files Browse the repository at this point in the history
Use the page pool's ability to maintain DMA mappings for us.
This avoids re-mapping of the recycled pages.

Link: https://lore.kernel.org/netdev/20230728231829.235716-4-michael.chan@broadcom.com/
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230817231911.165035-3-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
skotur-brcm authored and kuba-moo committed Aug 19, 2023
1 parent 86b0550 commit 578fcfd
Showing 1 changed file with 10 additions and 22 deletions.
32 changes: 10 additions & 22 deletions drivers/net/ethernet/broadcom/bnxt/bnxt.c
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
unsigned int *offset,
gfp_t gfp)
{
struct device *dev = &bp->pdev->dev;
struct page *page;

if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
Expand All @@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page)
return NULL;

*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
*mapping = page_pool_get_dma_addr(page) + *offset;
return page;
}

Expand Down Expand Up @@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir);
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
Expand Down Expand Up @@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir);

if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
Expand Down Expand Up @@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}

dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
bp->rx_dir);

total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
Expand Down Expand Up @@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)

rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data);
} else {
dma_unmap_single_attrs(&pdev->dev, mapping,
Expand All @@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
if (!page)
continue;

dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(i, rxr->rx_agg_bmap);

Expand Down Expand Up @@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;

Expand Down

0 comments on commit 578fcfd

Please sign in to comment.