mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-10 18:40:25 -04:00
octeontx2-pf: use xdp_return_frame() to free xdp buffers
xdp_return_frames() will help to free the xdp frames and their associated pages back to page pool. Signed-off-by: Geetha sowjanya <gakula@marvell.com> Signed-off-by: Suman Ghosh <sumang@marvell.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
@@ -21,6 +21,7 @@
|
||||
#include <linux/time64.h>
|
||||
#include <linux/dim.h>
|
||||
#include <uapi/linux/if_macsec.h>
|
||||
#include <net/page_pool/helpers.h>
|
||||
|
||||
#include <mbox.h>
|
||||
#include <npc.h>
|
||||
@@ -1094,7 +1095,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
|
||||
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
|
||||
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
|
||||
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
|
||||
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
|
||||
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
|
||||
u64 iova, int len, u16 qidx, u16 flags);
|
||||
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
|
||||
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
|
||||
netdev_features_t features);
|
||||
|
||||
@@ -2691,7 +2691,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
|
||||
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
|
||||
int qidx)
|
||||
{
|
||||
struct page *page;
|
||||
u64 dma_addr;
|
||||
int err = 0;
|
||||
|
||||
@@ -2701,11 +2700,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
|
||||
if (dma_mapping_error(pf->dev, dma_addr))
|
||||
return -ENOMEM;
|
||||
|
||||
err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
|
||||
err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
|
||||
qidx, XDP_REDIRECT);
|
||||
if (!err) {
|
||||
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
|
||||
page = virt_to_page(xdpf->data);
|
||||
put_page(page);
|
||||
xdp_return_frame(xdpf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -96,20 +96,16 @@ static unsigned int frag_num(unsigned int i)
|
||||
|
||||
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
|
||||
struct otx2_snd_queue *sq,
|
||||
struct nix_cqe_tx_s *cqe)
|
||||
struct nix_cqe_tx_s *cqe)
|
||||
{
|
||||
struct nix_send_comp_s *snd_comp = &cqe->comp;
|
||||
struct sg_list *sg;
|
||||
struct page *page;
|
||||
u64 pa;
|
||||
|
||||
sg = &sq->sg[snd_comp->sqe_id];
|
||||
|
||||
pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
|
||||
otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
|
||||
sg->size[0], DMA_TO_DEVICE);
|
||||
page = virt_to_page(phys_to_virt(pa));
|
||||
put_page(page);
|
||||
if (sg->flags & XDP_REDIRECT)
|
||||
otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
|
||||
xdp_return_frame((struct xdp_frame *)sg->skb);
|
||||
sg->skb = (u64)NULL;
|
||||
}
|
||||
|
||||
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
|
||||
@@ -1359,8 +1355,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
|
||||
}
|
||||
}
|
||||
|
||||
static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
|
||||
int len, int *offset)
|
||||
static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
|
||||
struct xdp_frame *xdpf,
|
||||
u64 dma_addr, int len, int *offset, u16 flags)
|
||||
{
|
||||
struct nix_sqe_sg_s *sg = NULL;
|
||||
u64 *iova = NULL;
|
||||
@@ -1377,9 +1374,12 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
|
||||
sq->sg[sq->head].dma_addr[0] = dma_addr;
|
||||
sq->sg[sq->head].size[0] = len;
|
||||
sq->sg[sq->head].num_segs = 1;
|
||||
sq->sg[sq->head].flags = flags;
|
||||
sq->sg[sq->head].skb = (u64)xdpf;
|
||||
}
|
||||
|
||||
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
|
||||
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
|
||||
u64 iova, int len, u16 qidx, u16 flags)
|
||||
{
|
||||
struct nix_sqe_hdr_s *sqe_hdr;
|
||||
struct otx2_snd_queue *sq;
|
||||
@@ -1405,7 +1405,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
|
||||
|
||||
offset = sizeof(*sqe_hdr);
|
||||
|
||||
otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
|
||||
otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
|
||||
sqe_hdr->sizem1 = (offset / 16) - 1;
|
||||
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
|
||||
|
||||
@@ -1419,6 +1419,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
|
||||
bool *need_xdp_flush)
|
||||
{
|
||||
unsigned char *hard_start;
|
||||
struct otx2_pool *pool;
|
||||
struct xdp_frame *xdpf;
|
||||
int qidx = cq->cq_idx;
|
||||
struct xdp_buff xdp;
|
||||
struct page *page;
|
||||
@@ -1426,6 +1428,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
|
||||
u32 act;
|
||||
int err;
|
||||
|
||||
pool = &pfvf->qset.pool[qidx];
|
||||
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
|
||||
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
|
||||
page = virt_to_page(phys_to_virt(pa));
|
||||
@@ -1444,19 +1447,21 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
|
||||
case XDP_TX:
|
||||
qidx += pfvf->hw.tx_queues;
|
||||
cq->pool_ptrs++;
|
||||
return otx2_xdp_sq_append_pkt(pfvf, iova,
|
||||
cqe->sg.seg_size, qidx);
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
return otx2_xdp_sq_append_pkt(pfvf, xdpf, cqe->sg.seg_addr,
|
||||
cqe->sg.seg_size, qidx, XDP_TX);
|
||||
case XDP_REDIRECT:
|
||||
cq->pool_ptrs++;
|
||||
err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
|
||||
|
||||
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (!err) {
|
||||
*need_xdp_flush = true;
|
||||
return true;
|
||||
}
|
||||
put_page(page);
|
||||
|
||||
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
|
||||
DMA_FROM_DEVICE);
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
xdp_return_frame(xdpf);
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
|
||||
@@ -1465,10 +1470,14 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
|
||||
trace_xdp_exception(pfvf->netdev, prog, act);
|
||||
break;
|
||||
case XDP_DROP:
|
||||
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
cq->pool_ptrs++;
|
||||
if (page->pp) {
|
||||
page_pool_recycle_direct(pool->page_pool, page);
|
||||
} else {
|
||||
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
|
||||
DMA_FROM_DEVICE);
|
||||
put_page(page);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
@@ -76,6 +76,7 @@ struct otx2_rcv_queue {
|
||||
|
||||
struct sg_list {
|
||||
u16 num_segs;
|
||||
u16 flags;
|
||||
u64 skb;
|
||||
u64 size[OTX2_MAX_FRAGS_IN_SQE];
|
||||
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
|
||||
|
||||
Reference in New Issue
Block a user