summaryrefslogtreecommitdiff
path: root/net/xdp
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-07-30 20:43:53 -0700
committerDavid S. Miller <davem@davemloft.net>2018-07-31 09:48:21 -0700
commit84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2 (patch)
tree06fb904b95b6f0427a577120753713aa39050660 /net/xdp
parentf734607e819b951bae3b436b026ec672082e9241 (diff)
downloadlinux-rpi-84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2.tar.gz
linux-rpi-84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2.tar.bz2
linux-rpi-84c6b86875e01a08a0daa6fdd4a01b36bf0bf0b2.zip
xsk: don't allow umem replace at stack level
Currently drivers have to check if they already have a umem installed for a given queue and return an error if so. Make better use of XDP_QUERY_XSK_UMEM and move this functionality to the core. We need to keep rtnl across the calls now. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Acked-by: Björn Töpel <bjorn.topel@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xdp_umem.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index c199d66b5f3f..911ca6d3cb5a 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -11,6 +11,8 @@
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
#include "xdp_umem.h"
#include "xsk_queue.h"
@@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
}
}
+int xdp_umem_query(struct net_device *dev, u16 queue_id)
+{
+ struct netdev_bpf bpf;
+
+ ASSERT_RTNL();
+
+ memset(&bpf, 0, sizeof(bpf));
+ bpf.command = XDP_QUERY_XSK_UMEM;
+ bpf.xsk.queue_id = queue_id;
+
+ if (!dev->netdev_ops->ndo_bpf)
+ return 0;
+ return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
+}
+
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u32 queue_id, u16 flags)
{
@@ -62,28 +79,30 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock();
- err = dev->netdev_ops->ndo_bpf(dev, &bpf);
- rtnl_unlock();
-
- if (err)
- return force_zc ? -ENOTSUPP : 0;
+ err = xdp_umem_query(dev, queue_id);
+ if (err) {
+ err = err < 0 ? -ENOTSUPP : -EBUSY;
+ goto err_rtnl_unlock;
+ }
bpf.command = XDP_SETUP_XSK_UMEM;
bpf.xsk.umem = umem;
bpf.xsk.queue_id = queue_id;
- rtnl_lock();
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
- rtnl_unlock();
-
if (err)
- return force_zc ? err : 0; /* fail or fallback */
+ goto err_rtnl_unlock;
+ rtnl_unlock();
dev_hold(dev);
umem->dev = dev;
umem->queue_id = queue_id;
umem->zc = true;
return 0;
+
+err_rtnl_unlock:
+ rtnl_unlock();
+ return force_zc ? err : 0; /* fail or fallback */
}
static void xdp_umem_clear_dev(struct xdp_umem *umem)