summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/Makefile.helpers59
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst12
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst33
-rw-r--r--tools/bpf/bpftool/Makefile10
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool134
-rw-r--r--tools/bpf/bpftool/btf_dumper.c251
-rw-r--r--tools/bpf/bpftool/cgroup.c170
-rw-r--r--tools/bpf/bpftool/common.c2
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h36
-rw-r--r--tools/bpf/bpftool/map.c223
-rw-r--r--tools/bpf/bpftool/prog.c249
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c6
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-reallocarray.c8
-rw-r--r--tools/include/linux/compiler-gcc.h4
-rw-r--r--tools/include/linux/overflow.h278
-rw-r--r--tools/include/tools/libc_compat.h20
-rw-r--r--tools/include/uapi/linux/bpf.h35
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile6
-rw-r--r--tools/lib/bpf/btf.c44
-rw-r--r--tools/lib/bpf/btf.h3
-rw-r--r--tools/lib/bpf/libbpf.c237
-rw-r--r--tools/lib/bpf/libbpf.h13
-rw-r--r--tools/lib/bpf/libbpf_errno.c74
-rw-r--r--tools/testing/selftests/bpf/Makefile1
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h6
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py232
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c37
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h1
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c17
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c119
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c48
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh217
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh197
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh189
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh233
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh167
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh119
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh117
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh55
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh18
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh19
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh134
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/forwarding/README2
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_port_isolation.sh151
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh108
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh253
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh291
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh126
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh283
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh285
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh21
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge.sh113
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_bridge_vlan.sh132
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh233
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh86
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_shblocks.sh2
-rwxr-xr-xtools/testing/selftests/net/ip6_gre_headroom.sh65
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh128
-rw-r--r--tools/testing/selftests/net/tls.c692
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json917
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/fw.json1049
80 files changed, 8667 insertions, 377 deletions
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
new file mode 100644
index 000000000000..c34fea77f39f
--- /dev/null
+++ b/tools/bpf/Makefile.helpers
@@ -0,0 +1,59 @@
+ifndef allow-override
+ include ../scripts/Makefile.include
+ include ../scripts/utilities.mak
+else
+ # Assume Makefile.helpers is being run from bpftool/Documentation
+ # subdirectory. Go up two more directories to fetch bpf.h header and
+ # associated script.
+ UP2DIR := ../../
+endif
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man7dir = $(mandir)/man7
+
+HELPERS_RST = bpf-helpers.rst
+MAN7_RST = $(HELPERS_RST)
+
+_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
+DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
+
+helpers: man7
+man7: $(DOC_MAN7)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
+$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h
+ $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@
+
+$(OUTPUT)%.7: $(OUTPUT)%.rst
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
+ $(QUIET_GEN)rst2man $< > $@
+
+helpers-clean:
+ $(call QUIET_CLEAN, eBPF_helpers-manpage)
+ $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST)
+
+helpers-install: helpers
+ $(call QUIET_INSTALL, eBPF_helpers-manpage)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
+ $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
+
+helpers-uninstall:
+ $(call QUIET_UNINST, eBPF_helpers-manpage)
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7))
+ $(Q)$(RMDIR) $(DESTDIR)$(man7dir)
+
+.PHONY: helpers helpers-clean helpers-install helpers-uninstall
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index a9d47c1558bb..f7663a3e60c9 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -15,12 +15,15 @@ prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
-MAN8_RST = $(wildcard *.rst)
+# Load targets for building eBPF helpers man page.
+include ../../Makefile.helpers
+
+MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
-man: man8
+man: man8 helpers
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
@@ -31,16 +34,16 @@ ifndef RST2MAN_DEP
endif
$(QUIET_GEN)rst2man $< > $@
-clean:
+clean: helpers-clean
$(call QUIET_CLEAN, Documentation)
$(Q)$(RM) $(DOC_MAN8)
-install: man
+install: man helpers-install
$(call QUIET_INSTALL, Documentation-man)
$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
-uninstall:
+uninstall: helpers-uninstall
$(call QUIET_UNINST, Documentation-man)
$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 7b0e6d453e92..edbe81534c6d 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -15,12 +15,13 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **list** | **attach** | **detach** | **help** }
+ { **show** | **list** | **tree** | **attach** | **detach** | **help** }
MAP COMMANDS
=============
| **bpftool** **cgroup { show | list }** *CGROUP*
+| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
| **bpftool** **cgroup help**
@@ -39,6 +40,15 @@ DESCRIPTION
Output will start with program ID followed by attach type,
attach flags and program name.
+ **bpftool cgroup tree** [*CGROUP_ROOT*]
+ Iterate over all cgroups in *CGROUP_ROOT* and list all
+ attached programs. If *CGROUP_ROOT* is not specified,
+ bpftool uses cgroup v2 mountpoint.
+
+ The output is similar to the output of cgroup show/list
+ commands: it starts with absolute cgroup path, followed by
+ program ID, attach type, attach flags and program name.
+
**bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
Attach program *PROG* to the cgroup *CGROUP* with attach type
*ATTACH_TYPE* and optional *ATTACH_FLAGS*.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 43d34a5c3ec5..64156a16d530 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -24,10 +24,20 @@ MAP COMMANDS
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog load** *OBJ* *FILE*
+| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
| **bpftool** **prog help**
|
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *TYPE* := {
+| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** |
+| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** |
+| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
+| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
+| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
+| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
+| }
+
DESCRIPTION
===========
@@ -64,8 +74,19 @@ DESCRIPTION
Note: *FILE* must be located in *bpffs* mount.
- **bpftool prog load** *OBJ* *FILE*
+ **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
Load bpf program from binary *OBJ* and pin as *FILE*.
+ **type** is optional, if not specified program type will be
+ inferred from section names.
+ By default bpftool will create new maps as declared in the ELF
+ object being loaded. **map** parameter allows for the reuse
+ of existing maps. It can be specified multiple times, each
+ time for a different map. *IDX* refers to index of the map
+ to be replaced in the ELF file counting from 0, while *NAME*
+ allows to replace a map by name. *MAP* specifies the map to
+ use, referring to it by **id** or through a **pinned** file.
+ If **dev** *NAME* is specified program will be loaded onto
+ given networking device (offload).
Note: *FILE* must be located in *bpffs* mount.
@@ -159,6 +180,14 @@ EXAMPLES
mov %rbx,0x0(%rbp)
48 89 5d 00
+|
+| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
+| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
+| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
+| loaded_at 2018-06-25T16:17:31-0700 uid 0
+| xlated 488B jited 336B memlock 4096B map_ids 7
+| **# rm /sys/fs/bpf/xdp1**
+|
SEE ALSO
========
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 892dbf095bff..74288a2197ab 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -23,10 +23,10 @@ endif
LIBBPF = $(BPF_PATH)libbpf.a
-BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion)
$(LIBBPF): FORCE
- $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
@@ -52,7 +52,7 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray
FEATURE_DISPLAY = libbfd disassembler-four-args
check_feat := 1
@@ -75,6 +75,10 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-reallocarray), 0)
+CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+endif
+
include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 1e1083321643..598066c40191 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -99,6 +99,35 @@ _bpftool_get_prog_tags()
command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) )
}
+_bpftool_get_obj_map_names()
+{
+ local obj
+
+ obj=$1
+
+ maps=$(objdump -j maps -t $obj 2>/dev/null | \
+ command awk '/g . maps/ {print $NF}')
+
+ COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
+}
+
+_bpftool_get_obj_map_idxs()
+{
+ local obj
+
+ obj=$1
+
+ nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g . maps')
+
+ COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) )
+}
+
+_sysfs_get_netdevs()
+{
+ COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \
+ "$cur" ) )
+}
+
# For bpftool map update: retrieve type of the map to update.
_bpftool_map_update_map_type()
{
@@ -153,6 +182,13 @@ _bpftool()
local cur prev words objword
_init_completion || return
+ # Deal with options
+ if [[ ${words[cword]} == -* ]]; then
+ local c='--version --json --pretty --bpffs'
+ COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
+ return 0
+ fi
+
# Deal with simplest keywords
case $prev in
help|hex|opcodes|visual)
@@ -172,20 +208,23 @@ _bpftool()
;;
esac
- # Search for object and command
- local object command cmdword
- for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do
- [[ -n $object ]] && command=${words[cmdword]} && break
- [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]}
+ # Remove all options so completions don't have to deal with them.
+ local i
+ for (( i=1; i < ${#words[@]}; )); do
+ if [[ ${words[i]::1} == - ]]; then
+ words=( "${words[@]:0:i}" "${words[@]:i+1}" )
+ [[ $i -le $cword ]] && cword=$(( cword - 1 ))
+ else
+ i=$(( ++i ))
+ fi
done
+ cur=${words[cword]}
+ prev=${words[cword - 1]}
+
+ local object=${words[1]} command=${words[2]}
- if [[ -z $object ]]; then
+ if [[ -z $object || $cword -eq 1 ]]; then
case $cur in
- -*)
- local c='--version --json --pretty'
- COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
- return 0
- ;;
*)
COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \
command sed \
@@ -204,12 +243,14 @@ _bpftool()
# Completion depends on object and command in use
case $object in
prog)
- case $prev in
- id)
- _bpftool_get_prog_ids
- return 0
- ;;
- esac
+ if [[ $command != "load" ]]; then
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ esac
+ fi
local PROG_TYPE='id pinned tag'
case $command in
@@ -252,8 +293,57 @@ _bpftool()
return 0
;;
load)
- _filedir
- return 0
+ local obj
+
+ if [[ ${#words[@]} -lt 6 ]]; then
+ _filedir
+ return 0
+ fi
+
+ obj=${words[3]}
+
+ if [[ ${words[-4]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+ return 0
+ fi
+ if [[ ${words[-3]} == "map" ]]; then
+ if [[ ${words[-2]} == "idx" ]]; then
+ _bpftool_get_obj_map_idxs $obj
+ elif [[ ${words[-2]} == "name" ]]; then
+ _bpftool_get_obj_map_names $obj
+ fi
+ return 0
+ fi
+ if [[ ${words[-2]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) )
+ return 0
+ fi
+
+ case $prev in
+ type)
+ COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ return 0
+ ;;
+ pinned)
+ _filedir
+ return 0
+ ;;
+ dev)
+ _sysfs_get_netdevs
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
+ _bpftool_once_attr 'type'
+ _bpftool_once_attr 'dev'
+ return 0
+ ;;
+ esac
;;
*)
[[ $prev == $object ]] && \
@@ -404,6 +494,10 @@ _bpftool()
_filedir
return 0
;;
+ tree)
+ _filedir
+ return 0
+ ;;
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device bind4 bind6 post_bind4 post_bind6 connect4 \
@@ -445,7 +539,7 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'help attach detach \
- show list' -- "$cur" ) )
+ show list tree' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
new file mode 100644
index 000000000000..55bc512a1831
--- /dev/null
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+
+#include <ctype.h>
+#include <stdio.h> /* for (FILE *) used by json_writer */
+#include <string.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+
+#include "btf.h"
+#include "json_writer.h"
+#include "main.h"
+
+#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
+#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
+#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
+#define BITS_ROUNDUP_BYTES(bits) \
+ (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data);
+
+static void btf_dumper_ptr(const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ if (is_plain_text)
+ jsonw_printf(jw, "%p", *(unsigned long *)data);
+ else
+ jsonw_printf(jw, "%u", *(unsigned long *)data);
+}
+
+static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ int actual_type_id;
+
+ actual_type_id = btf__resolve_type(d->btf, type_id);
+ if (actual_type_id < 0)
+ return actual_type_id;
+
+ return btf_dumper_do_type(d, actual_type_id, 0, data);
+}
+
+static void btf_dumper_enum(const void *data, json_writer_t *jw)
+{
+ jsonw_printf(jw, "%d", *(int *)data);
+}
+
+static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+ struct btf_array *arr = (struct btf_array *)(t + 1);
+ long long elem_size;
+ int ret = 0;
+ __u32 i;
+
+ elem_size = btf__resolve_size(d->btf, arr->type);
+ if (elem_size < 0)
+ return elem_size;
+
+ jsonw_start_array(d->jw);
+ for (i = 0; i < arr->nelems; i++) {
+ ret = btf_dumper_do_type(d, arr->type, 0,
+ data + i * elem_size);
+ if (ret)
+ break;
+ }
+
+ jsonw_end_array(d->jw);
+ return ret;
+}
+
+static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ int left_shift_bits, right_shift_bits;
+ int nr_bits = BTF_INT_BITS(int_type);
+ int total_bits_offset;
+ int bytes_to_copy;
+ int bits_to_copy;
+ __u64 print_num;
+
+ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ bits_to_copy = bit_offset + nr_bits;
+ bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
+
+ print_num = 0;
+ memcpy(&print_num, data, bytes_to_copy);
+#if defined(__BIG_ENDIAN_BITFIELD)
+ left_shift_bits = bit_offset;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ left_shift_bits = 64 - bits_to_copy;
+#else
+#error neither big nor little endian
+#endif
+ right_shift_bits = 64 - nr_bits;
+
+ print_num <<= left_shift_bits;
+ print_num >>= right_shift_bits;
+ if (is_plain_text)
+ jsonw_printf(jw, "0x%llx", print_num);
+ else
+ jsonw_printf(jw, "%llu", print_num);
+}
+
+static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ __u32 *int_type;
+ __u32 nr_bits;
+
+ int_type = (__u32 *)(t + 1);
+ nr_bits = BTF_INT_BITS(*int_type);
+ /* if this is bit field */
+ if (bit_offset || BTF_INT_OFFSET(*int_type) ||
+ BITS_PER_BYTE_MASKED(nr_bits)) {
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ return 0;
+ }
+
+ switch (BTF_INT_ENCODING(*int_type)) {
+ case 0:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%lu", *(__u64 *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%u", *(__u32 *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hu", *(__u16 *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhu", *(__u8 *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_SIGNED:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%ld", *(long long *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%d", *(int *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hd", *(short *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhd", *(char *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_CHAR:
+ if (isprint(*(char *)data))
+ jsonw_printf(jw, "\"%c\"", *(char *)data);
+ else
+ if (is_plain_text)
+ jsonw_printf(jw, "0x%hhx", *(char *)data);
+ else
+ jsonw_printf(jw, "\"\\u00%02hhx\"",
+ *(char *)data);
+ break;
+ case BTF_INT_BOOL:
+ jsonw_bool(jw, *(int *)data);
+ break;
+ default:
+ /* shouldn't happen */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t;
+ struct btf_member *m;
+ const void *data_off;
+ int ret = 0;
+ int i, vlen;
+
+ t = btf__type_by_id(d->btf, type_id);
+ if (!t)
+ return -EINVAL;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ jsonw_start_object(d->jw);
+ m = (struct btf_member *)(t + 1);
+
+ for (i = 0; i < vlen; i++) {
+ data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+ ret = btf_dumper_do_type(d, m[i].type,
+ BITS_PER_BYTE_MASKED(m[i].offset),
+ data_off);
+ if (ret)
+ break;
+ }
+
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ return btf_dumper_int(t, bit_offset, data, d->jw,
+ d->is_plain_text);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return btf_dumper_struct(d, type_id, data);
+ case BTF_KIND_ARRAY:
+ return btf_dumper_array(d, type_id, data);
+ case BTF_KIND_ENUM:
+ btf_dumper_enum(data, d->jw);
+ return 0;
+ case BTF_KIND_PTR:
+ btf_dumper_ptr(data, d->jw, d->is_plain_text);
+ return 0;
+ case BTF_KIND_UNKN:
+ jsonw_printf(d->jw, "(unknown)");
+ return 0;
+ case BTF_KIND_FWD:
+ /* map key or value can't be forward */
+ jsonw_printf(d->jw, "(fwd-kind-invalid)");
+ return -EINVAL;
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_dumper_modifier(d, type_id, data);
+ default:
+ jsonw_printf(d->jw, "(unsupported-kind");
+ return -EINVAL;
+ }
+}
+
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ return btf_dumper_do_type(d, type_id, 0, data);
+}
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 16bee011e16c..ee7a9765c6b3 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -2,7 +2,12 @@
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <guro@fb.com>
+#define _XOPEN_SOURCE 500
+#include <errno.h>
#include <fcntl.h>
+#include <ftw.h>
+#include <mntent.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
@@ -53,7 +58,8 @@ static enum bpf_attach_type parse_attach_type(const char *str)
}
static int show_bpf_prog(int id, const char *attach_type_str,
- const char *attach_flags_str)
+ const char *attach_flags_str,
+ int level)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
@@ -78,7 +84,8 @@ static int show_bpf_prog(int id, const char *attach_type_str,
jsonw_string_field(json_wtr, "name", info.name);
jsonw_end_object(json_wtr);
} else {
- printf("%-8u %-15s %-15s %-15s\n", info.id,
+ printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "",
+ info.id,
attach_type_str,
attach_flags_str,
info.name);
@@ -88,7 +95,20 @@ static int show_bpf_prog(int id, const char *attach_type_str,
return 0;
}
-static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+ __u32 prog_cnt = 0;
+ int ret;
+
+ ret = bpf_prog_query(cgroup_fd, type, 0, NULL, NULL, &prog_cnt);
+ if (ret)
+ return -1;
+
+ return prog_cnt;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
{
__u32 prog_ids[1024] = {0};
char *attach_flags_str;
@@ -123,7 +143,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
for (iter = 0; iter < prog_cnt; iter++)
show_bpf_prog(prog_ids[iter], attach_type_strings[type],
- attach_flags_str);
+ attach_flags_str, level);
return 0;
}
@@ -161,7 +181,7 @@ static int do_show(int argc, char **argv)
* If we were able to get the show for at least one
* attach type, let's return 0.
*/
- if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+ if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0)
ret = 0;
}
@@ -173,6 +193,143 @@ exit:
return ret;
}
+/*
+ * To distinguish nftw() errors and do_show_tree_fn() errors
+ * and avoid duplicating error messages, let's return -2
+ * from do_show_tree_fn() in case of error.
+ */
+#define NFTW_ERR -1
+#define SHOW_TREE_FN_ERR -2
+static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ int typeflag, struct FTW *ftw)
+{
+ enum bpf_attach_type type;
+ bool skip = true;
+ int cgroup_fd;
+
+ if (typeflag != FTW_D)
+ return 0;
+
+ cgroup_fd = open(fpath, O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s: %s", fpath, strerror(errno));
+ return SHOW_TREE_FN_ERR;
+ }
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ int count = count_attached_bpf_progs(cgroup_fd, type);
+
+ if (count < 0 && errno != EINVAL) {
+ p_err("can't query bpf programs attached to %s: %s",
+ fpath, strerror(errno));
+ close(cgroup_fd);
+ return SHOW_TREE_FN_ERR;
+ }
+ if (count > 0) {
+ skip = false;
+ break;
+ }
+ }
+
+ if (skip) {
+ close(cgroup_fd);
+ return 0;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "cgroup", fpath);
+ jsonw_name(json_wtr, "programs");
+ jsonw_start_array(json_wtr);
+ } else {
+ printf("%s\n", fpath);
+ }
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+ show_attached_bpf_progs(cgroup_fd, type, ftw->level);
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_end_object(json_wtr);
+ }
+
+ close(cgroup_fd);
+
+ return 0;
+}
+
+static char *find_cgroup_root(void)
+{
+ struct mntent *mnt;
+ FILE *f;
+
+ f = fopen("/proc/mounts", "r");
+ if (f == NULL)
+ return NULL;
+
+ while ((mnt = getmntent(f))) {
+ if (strcmp(mnt->mnt_type, "cgroup2") == 0) {
+ fclose(f);
+ return strdup(mnt->mnt_dir);
+ }
+ }
+
+ fclose(f);
+ return NULL;
+}
+
+static int do_show_tree(int argc, char **argv)
+{
+ char *cgroup_root;
+ int ret;
+
+ switch (argc) {
+ case 0:
+ cgroup_root = find_cgroup_root();
+ if (!cgroup_root) {
+ p_err("cgroup v2 isn't mounted");
+ return -1;
+ }
+ break;
+ case 1:
+ cgroup_root = argv[0];
+ break;
+ default:
+ p_err("too many parameters for cgroup tree");
+ return -1;
+ }
+
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else
+ printf("%s\n"
+ "%-8s %-15s %-15s %-15s\n",
+ "CgroupPath",
+ "ID", "AttachType", "AttachFlags", "Name");
+
+ switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) {
+ case NFTW_ERR:
+ p_err("can't iterate over %s: %s", cgroup_root,
+ strerror(errno));
+ ret = -1;
+ break;
+ case SHOW_TREE_FN_ERR:
+ ret = -1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ if (argc == 0)
+ free(cgroup_root);
+
+ return ret;
+}
+
static int do_attach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
@@ -289,6 +446,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } CGROUP\n"
+ " %s %s tree [CGROUP_ROOT]\n"
" %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
" %s %s detach CGROUP ATTACH_TYPE PROG\n"
" %s %s help\n"
@@ -298,6 +456,7 @@ static int do_help(int argc, char **argv)
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS "\n"
"",
+ bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2]);
@@ -307,6 +466,7 @@ static int do_help(int argc, char **argv)
static const struct cmd cmds[] = {
{ "show", do_show },
{ "list", do_show },
+ { "tree", do_show_tree },
{ "attach", do_attach },
{ "detach", do_detach },
{ "help", do_help },
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 3f140eff039f..b3a0709ea7ed 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index eea7f14355f3..d15a62be6cf0 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <bfd.h>
#include <ctype.h>
#include <errno.h>
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 63fdb310b9a4..238e734d75b3 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#ifndef __BPF_TOOL_H
#define __BPF_TOOL_H
@@ -44,6 +42,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/hashtable.h>
+#include <tools/libc_compat.h>
#include "json_writer.h"
@@ -52,6 +51,21 @@
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
+#define GET_ARG() ({ argc--; *argv++; })
+#define REQ_ARGS(cnt) \
+ ({ \
+ int _cnt = (cnt); \
+ bool _res; \
+ \
+ if (argc < _cnt) { \
+ p_err("'%s' needs at least %d arguments, %d found", \
+ argv[-1], _cnt, argc); \
+ _res = false; \
+ } else { \
+ _res = true; \
+ } \
+ _res; \
+ })
#define ERR_MAX_LEN 1024
@@ -61,6 +75,8 @@
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
#define HELP_SPEC_OPTIONS \
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }"
+#define HELP_SPEC_MAP \
+ "MAP := { id MAP_ID | pinned FILE }"
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
@@ -122,6 +138,7 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int prog_parse_fd(int *argc, char ***argv);
+int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
@@ -133,4 +150,19 @@ unsigned int get_page_size(void);
unsigned int get_possible_cpus(void);
const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+struct btf_dumper {
+ const struct btf *btf;
+ json_writer_t *jw;
+ bool is_plain_text;
+};
+
+/* btf_dumper_type - print data along with type information
+ * @d: an instance containing context for dumping types
+ * @type_id: index in btf->types array. this points to the type to be dumped
+ * @data: pointer the actual data, i.e. the values to be printed
+ *
+ * Returns zero on success and negative error code otherwise
+ */
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 097b1a5e046b..9c8191845585 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -31,11 +31,10 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -46,6 +45,8 @@
#include <bpf.h>
+#include "btf.h"
+#include "json_writer.h"
#include "main.h"
static const char * const map_type_name[] = {
@@ -95,7 +96,7 @@ static void *alloc_value(struct bpf_map_info *info)
return malloc(info->value_size);
}
-static int map_parse_fd(int *argc, char ***argv)
+int map_parse_fd(int *argc, char ***argv)
{
int fd;
@@ -150,8 +151,109 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
return fd;
}
+static int do_dump_btf(const struct btf_dumper *d,
+ struct bpf_map_info *map_info, void *key,
+ void *value)
+{
+ int ret;
+
+ /* start of key-value pair */
+ jsonw_start_object(d->jw);
+
+ jsonw_name(d->jw, "key");
+
+ ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+ if (ret)
+ goto err_end_obj;
+
+ jsonw_name(d->jw, "value");
+
+ ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
+
+err_end_obj:
+ /* end of key-value pair */
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int get_btf(struct bpf_map_info *map_info, struct btf **btf)
+{
+ struct bpf_btf_info btf_info = { 0 };
+ __u32 len = sizeof(btf_info);
+ __u32 last_size;
+ int btf_fd;
+ void *ptr;
+ int err;
+
+ err = 0;
+ *btf = NULL;
+ btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id);
+ if (btf_fd < 0)
+ return 0;
+
+ /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+ * let's start with a sane default - 4KiB here - and resize it only if
+ * bpf_obj_get_info_by_fd() needs a bigger buffer.
+ */
+ btf_info.btf_size = 4096;
+ last_size = btf_info.btf_size;
+ ptr = malloc(last_size);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+
+ if (!err && btf_info.btf_size > last_size) {
+ void *temp_ptr;
+
+ last_size = btf_info.btf_size;
+ temp_ptr = realloc(ptr, last_size);
+ if (!temp_ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+ ptr = temp_ptr;
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ }
+
+ if (err || btf_info.btf_size > last_size) {
+ err = errno;
+ goto exit_free;
+ }
+
+ *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
+ if (IS_ERR(*btf)) {
+ err = PTR_ERR(btf);
+ *btf = NULL;
+ }
+
+exit_free:
+ close(btf_fd);
+ free(ptr);
+
+ return err;
+}
+
+static json_writer_t *get_btf_writer(void)
+{
+ json_writer_t *jw = jsonw_new(stdout);
+
+ if (!jw)
+ return NULL;
+ jsonw_pretty(jw, true);
+
+ return jw;
+}
+
static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
- unsigned char *value)
+ unsigned char *value, struct btf *btf)
{
jsonw_start_object(json_wtr);
@@ -160,6 +262,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ jsonw_name(json_wtr, "formatted");
+ do_dump_btf(&d, info, key, value);
+ }
} else {
unsigned int i, n;
@@ -510,10 +622,12 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
+ struct bpf_map_info info = {};
void *key, *value, *prev_key;
unsigned int num_elems = 0;
- struct bpf_map_info info = {};
__u32 len = sizeof(info);
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
int err;
int fd;
@@ -539,8 +653,27 @@ static int do_dump(int argc, char **argv)
}
prev_key = NULL;
+
+ err = get_btf(&info, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ goto exit_free;
+ }
+
if (json_output)
jsonw_start_array(json_wtr);
+ else
+ if (btf) {
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ } else {
+ jsonw_start_array(btf_wtr);
+ }
+ }
+
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
@@ -551,9 +684,19 @@ static int do_dump(int argc, char **argv)
if (!bpf_map_lookup_elem(fd, key, value)) {
if (json_output)
- print_entry_json(&info, key, value);
+ print_entry_json(&info, key, value, btf);
else
- print_entry_plain(&info, key, value);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, &info, key, value);
+ } else {
+ print_entry_plain(&info, key, value);
+ }
} else {
if (json_output) {
jsonw_name(json_wtr, "key");
@@ -576,14 +719,19 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
- else
+ else if (btf) {
+ jsonw_end_array(btf_wtr);
+ jsonw_destroy(&btf_wtr);
+ } else {
printf("Found %u element%s\n", num_elems,
num_elems != 1 ? "s" : "");
+ }
exit_free:
free(key);
free(value);
close(fd);
+ btf__free(btf);
return err;
}
@@ -639,6 +787,8 @@ static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -663,27 +813,60 @@ static int do_lookup(int argc, char **argv)
goto exit_free;
err = bpf_map_lookup_elem(fd, key, value);
- if (!err) {
- if (json_output)
- print_entry_json(&info, key, value);
- else
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output) {
+ jsonw_null(json_wtr);
+ } else {
+ printf("key:\n");
+ fprint_hex(stdout, key, info.key_size, " ");
+ printf("\n\nNot found\n");
+ }
+ } else {
+ p_err("lookup failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ /* here means bpf_map_lookup_elem() succeeded */
+ err = get_btf(&info, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ goto exit_free;
+ }
+
+ if (json_output) {
+ print_entry_json(&info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
print_entry_plain(&info, key, value);
- } else if (errno == ENOENT) {
- if (json_output) {
- jsonw_null(json_wtr);
} else {
- printf("key:\n");
- fprint_hex(stdout, key, info.key_size, " ");
- printf("\n\nNot found\n");
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, &info, key, value);
+ jsonw_destroy(&btf_wtr);
}
} else {
- p_err("lookup failed: %s", strerror(errno));
+ print_entry_plain(&info, key, value);
}
exit_free:
free(key);
free(value);
close(fd);
+ btf__free(btf);
return err;
}
@@ -826,7 +1009,7 @@ static int do_help(int argc, char **argv)
" %s %s event_pipe MAP [cpu N index M]\n"
" %s %s help\n"
"\n"
- " MAP := { id MAP_ID | pinned FILE }\n"
+ " " HELP_SPEC_MAP "\n"
" DATA := { [hex] BYTES }\n"
" " HELP_SPEC_PROGRAM "\n"
" VALUE := { DATA | MAP | PROG }\n"
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 959aa53ab678..dce960d22106 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,7 @@
* SOFTWARE.
*/
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
+#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
@@ -41,9 +40,12 @@
#include <string.h>
#include <time.h>
#include <unistd.h>
+#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <linux/err.h>
+
#include <bpf.h>
#include <libbpf.h>
@@ -681,31 +683,247 @@ static int do_pin(int argc, char **argv)
return err;
}
+struct map_replace {
+ int idx;
+ int fd;
+ char *name;
+};
+
+int map_replace_compar(const void *p1, const void *p2)
+{
+ const struct map_replace *a = p1, *b = p2;
+
+ return a->idx - b->idx;
+}
+
static int do_load(int argc, char **argv)
{
+ enum bpf_attach_type expected_attach_type;
+ struct bpf_object_open_attr attr = {
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+ struct map_replace *map_replace = NULL;
+ unsigned int old_map_fds = 0;
+ struct bpf_program *prog;
struct bpf_object *obj;
- int prog_fd;
-
- if (argc != 2)
- usage();
+ struct bpf_map *map;
+ const char *pinfile;
+ unsigned int i, j;
+ __u32 ifindex = 0;
+ int idx, err;
- if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
- p_err("failed to load program");
+ if (!REQ_ARGS(2))
return -1;
+ attr.file = GET_ARG();
+ pinfile = GET_ARG();
+
+ while (argc) {
+ if (is_prefix(*argv, "type")) {
+ char *type;
+
+ NEXT_ARG();
+
+ if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
+ p_err("program type already specified");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ /* Put a '/' at the end of type to appease libbpf */
+ type = malloc(strlen(*argv) + 2);
+ if (!type) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ *type = 0;
+ strcat(type, *argv);
+ strcat(type, "/");
+
+ err = libbpf_prog_type_by_name(type, &attr.prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0) {
+ p_err("unknown program type '%s'", *argv);
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "map")) {
+ char *endptr, *name;
+ int fd;
+
+ NEXT_ARG();
+
+ if (!REQ_ARGS(4))
+ goto err_free_reuse_maps;
+
+ if (is_prefix(*argv, "idx")) {
+ NEXT_ARG();
+
+ idx = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as IDX", *argv);
+ goto err_free_reuse_maps;
+ }
+ name = NULL;
+ } else if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ name = *argv;
+ idx = -1;
+ } else {
+ p_err("expected 'idx' or 'name', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+
+ fd = map_parse_fd(&argc, &argv);
+ if (fd < 0)
+ goto err_free_reuse_maps;
+
+ map_replace = reallocarray(map_replace, old_map_fds + 1,
+ sizeof(*map_replace));
+ if (!map_replace) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ map_replace[old_map_fds].idx = idx;
+ map_replace[old_map_fds].name = name;
+ map_replace[old_map_fds].fd = fd;
+ old_map_fds++;
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (ifindex) {
+ p_err("offload device already specified");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ ifindex = if_nametoindex(*argv);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ }
+
+ obj = bpf_object__open_xattr(&attr);
+ if (IS_ERR_OR_NULL(obj)) {
+ p_err("failed to open object file");
+ goto err_free_reuse_maps;
+ }
+
+ prog = bpf_program__next(NULL, obj);
+ if (!prog) {
+ p_err("object file doesn't contain any bpf program");
+ goto err_close_obj;
+ }
+
+ bpf_program__set_ifindex(prog, ifindex);
+ if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
+ const char *sec_name = bpf_program__title(prog, false);
+
+ err = libbpf_prog_type_by_name(sec_name, &attr.prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ p_err("failed to guess program type based on section name %s\n",
+ sec_name);
+ goto err_close_obj;
+ }
+ }
+ bpf_program__set_type(prog, attr.prog_type);
+ bpf_program__set_expected_attach_type(prog, expected_attach_type);
+
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* After the sort maps by name will be first on the list, because they
+ * have idx == -1. Resolve them.
+ */
+ j = 0;
+ while (j < old_map_fds && map_replace[j].name) {
+ i = 0;
+ bpf_map__for_each(map, obj) {
+ if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
+ map_replace[j].idx = i;
+ break;
+ }
+ i++;
+ }
+ if (map_replace[j].idx == -1) {
+ p_err("unable to find map '%s'", map_replace[j].name);
+ goto err_close_obj;
+ }
+ j++;
+ }
+ /* Resort if any names were resolved */
+ if (j)
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* Set ifindex and name reuse */
+ j = 0;
+ idx = 0;
+ bpf_map__for_each(map, obj) {
+ if (!bpf_map__is_offload_neutral(map))
+ bpf_map__set_ifindex(map, ifindex);
+
+ if (j < old_map_fds && idx == map_replace[j].idx) {
+ err = bpf_map__reuse_fd(map, map_replace[j++].fd);
+ if (err) {
+ p_err("unable to set up map reuse: %d", err);
+ goto err_close_obj;
+ }
+
+ /* Next reuse wants to apply to the same map */
+ if (j < old_map_fds && map_replace[j].idx == idx) {
+ p_err("replacement for map idx %d specified more than once",
+ idx);
+ goto err_close_obj;
+ }
+ }
+
+ idx++;
+ }
+ if (j < old_map_fds) {
+ p_err("map idx '%d' not used", map_replace[j].idx);
+ goto err_close_obj;
+ }
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
}
- if (do_pin_fd(prog_fd, argv[1]))
+ if (do_pin_fd(bpf_program__fd(prog), pinfile))
goto err_close_obj;
if (json_output)
jsonw_null(json_wtr);
bpf_object__close(obj);
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
return 0;
err_close_obj:
bpf_object__close(obj);
+err_free_reuse_maps:
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
return -1;
}
@@ -721,10 +939,19 @@ static int do_help(int argc, char **argv)
" %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s pin PROG FILE\n"
- " %s %s load OBJ FILE\n"
+ " %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n"
+ " [map { idx IDX | name NAME } MAP]\n"
" %s %s help\n"
"\n"
+ " " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
+ " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
+ " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
+ " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
+ " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
+ " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
+ " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
+ " cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index b97f1da60dd1..3284759df98a 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -35,6 +35,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#define _GNU_SOURCE
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
@@ -66,9 +67,8 @@ void kernel_syms_load(struct dump_data *dd)
while (!feof(fp)) {
if (!fgets(buff, sizeof(buff), fp))
break;
- tmp = realloc(dd->sym_mapping,
- (dd->sym_count + 1) *
- sizeof(*dd->sym_mapping));
+ tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
+ sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 5b6dda3b1ca8..f216b2f5c3d7 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -57,6 +57,7 @@ FEATURE_TESTS_BASIC := \
libunwind-aarch64 \
pthread-attr-setaffinity-np \
pthread-barrier \
+ reallocarray \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index dac9563b5470..0516259be70f 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -14,6 +14,7 @@ FILES= \
test-libaudit.bin \
test-libbfd.bin \
test-disassembler-four-args.bin \
+ test-reallocarray.bin \
test-liberty.bin \
test-liberty-z.bin \
test-cplus-demangle.bin \
@@ -204,6 +205,9 @@ $(OUTPUT)test-libbfd.bin:
$(OUTPUT)test-disassembler-four-args.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+$(OUTPUT)test-reallocarray.bin:
+ $(BUILD)
+
$(OUTPUT)test-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
new file mode 100644
index 000000000000..8170de35150d
--- /dev/null
+++ b/tools/build/feature/test-reallocarray.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdlib.h>
+
+int main(void)
+{
+ return !!reallocarray(NULL, 1, 1);
+}
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 70fe61295733..0d35f18006a1 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -36,3 +36,7 @@
#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+
+#if GCC_VERSION >= 50100
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#endif
diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h
new file mode 100644
index 000000000000..8712ff70995f
--- /dev/null
+++ b/tools/include/linux/overflow.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_add_overflow(__a, __b, __d); \
+})
+
+#define check_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_sub_overflow(__a, __b, __d); \
+})
+
+#define check_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ __builtin_mul_overflow(__a, __b, __d); \
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a + __b; \
+ *__d < __a; \
+})
+#define __unsigned_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a - __b; \
+ __a < __b; \
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = __a * __b; \
+ __builtin_constant_p(__b) ? \
+ __b > 0 && __a > type_max(typeof(__a)) / __b : \
+ __a > 0 && __b > type_max(typeof(__b)) / __a; \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a + (u64)__b; \
+ (((~(__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a - (u64)__b; \
+ ((((__a ^ __b)) & (*__d ^ __a)) \
+ & type_min(typeof(__a))) != 0; \
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ typeof(d) __d = (d); \
+ typeof(a) __tmax = type_max(typeof(a)); \
+ typeof(a) __tmin = type_min(typeof(a)); \
+ (void) (&__a == &__b); \
+ (void) (&__a == __d); \
+ *__d = (u64)__a * (u64)__b; \
+ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
+ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
+ (__b == (typeof(__b))-1 && __a == __tmin); \
+})
+
+
+#define check_add_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_add_overflow(a, b, d), \
+ __unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_sub_overflow(a, b, d), \
+ __unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d) \
+ __builtin_choose_expr(is_signed_type(typeof(a)), \
+ __signed_mul_overflow(a, b, d), \
+ __unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(a, b, &bytes))
+ return SIZE_MAX;
+ if (check_mul_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(n, size, &bytes))
+ return SIZE_MAX;
+ if (check_add_overflow(bytes, c, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n) \
+ __ab_c_size(n, \
+ sizeof(*(p)->member) + __must_be_array((p)->member),\
+ sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
new file mode 100644
index 000000000000..664ced8cb1b0
--- /dev/null
+++ b/tools/include/tools/libc_compat.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef __TOOLS_LIBC_COMPAT_H
+#define __TOOLS_LIBC_COMPAT_H
+
+#include <stdlib.h>
+#include <linux/overflow.h>
+
+#ifdef COMPAT_NEED_REALLOCARRAY
+static inline void *reallocarray(void *ptr, size_t nmemb, size_t size)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+ return NULL;
+ return realloc(ptr, bytes);
+}
+#endif
+#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 59b19b6a40d7..870113916cac 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1826,7 +1826,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1857,8 @@ union bpf_attr {
* is resolved), the nexthop address is returned in ipv4_dst
* or ipv6_dst based on family, smac is set to mac address of
* egress device, dmac is set to nexthop mac address, rt_metric
- * is set to metric from route (IPv4/IPv6 only).
+ * is set to metric from route (IPv4/IPv6 only), and ifindex
+ * is set to the device index of the nexthop from the FIB lookup.
*
* *plen* argument is the size of the passed in struct.
* *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
* Return
- * Egress device index on success, 0 if packet needs to continue
- * up the stack for further processing or a negative error in case
- * of failure.
+ * * < 0 if any input argument is invalid
+ * * 0 on success (packet is forwarded, nexthop neighbor exists)
+ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2031,7 +2033,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2051,7 +2052,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2555,6 +2555,9 @@ enum {
* Arg1: old_state
* Arg2: new_state
*/
+ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
+ * socket transition to LISTEN state.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2615,18 @@ struct bpf_raw_tracepoint_args {
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+enum {
+ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
+ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
+ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
+ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
+ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
+ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+};
+
struct bpf_fib_lookup {
/* input: network family for lookup (AF_INET, AF_INET6)
* output: network family of egress nexthop
@@ -2625,7 +2640,11 @@ struct bpf_fib_lookup {
/* total length of packet from network header - used for MTU check */
__u16 tot_len;
- __u32 ifindex; /* L3 device index for lookup */
+
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+ __u32 ifindex;
union {
/* inputs to lookup */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 6070e655042d..13a861135127 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 5390e7725e43..d49902e818b5 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -66,7 +66,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
+FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf
@@ -116,8 +116,8 @@ ifeq ($(feature-libelf-mmap), 1)
override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
endif
-ifeq ($(feature-libelf-getphdrnum), 1)
- override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
+ifeq ($(feature-reallocarray), 0)
+ override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
endif
# Append required CFLAGS
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d270c560df3..1622a309f169 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -16,6 +16,11 @@
#define BTF_MAX_NR_TYPES 65535
+#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
+ ((k) == BTF_KIND_VOLATILE) || \
+ ((k) == BTF_KIND_CONST) || \
+ ((k) == BTF_KIND_RESTRICT))
+
static struct btf_type btf_void;
struct btf {
@@ -269,6 +274,26 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
return nelems * size;
}
+int btf__resolve_type(const struct btf *btf, __u32 type_id)
+{
+ const struct btf_type *t;
+ int depth = 0;
+
+ t = btf__type_by_id(btf, type_id);
+ while (depth < MAX_RESOLVE_DEPTH &&
+ !btf_type_is_void_or_null(t) &&
+ IS_MODIFIER(BTF_INFO_KIND(t->info))) {
+ type_id = t->type;
+ t = btf__type_by_id(btf, type_id);
+ depth++;
+ }
+
+ if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
+ return -EINVAL;
+
+ return type_id;
+}
+
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
{
__u32 i;
@@ -278,7 +303,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
for (i = 1; i <= btf->nr_types; i++) {
const struct btf_type *t = btf->types[i];
- const char *name = btf_name_by_offset(btf, t->name_off);
+ const char *name = btf__name_by_offset(btf, t->name_off);
if (name && !strcmp(type_name, name))
return i;
@@ -368,3 +393,20 @@ int btf__fd(const struct btf *btf)
{
return btf->fd;
}
+
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+{
+ if (offset < btf->hdr->str_len)
+ return &btf->strings[offset];
+ else
+ return NULL;
+}
+
+const struct btf_type *btf__type_by_id(const struct btf *btf,
+ __u32 type_id)
+{
+ if (type_id > btf->nr_types)
+ return NULL;
+
+ return btf->types[type_id];
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index e2a09a155f84..dd8a86eab8ca 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -19,6 +19,9 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
+int btf__resolve_type(const struct btf *btf, __u32 type_id);
int btf__fd(const struct btf *btf);
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id);
#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1aafdbe827fe..26e9527ee464 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -22,6 +22,7 @@
* License along with this program; if not, see <http://www.gnu.org/licenses>
*/
+#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
@@ -42,6 +43,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
+#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
@@ -96,54 +98,6 @@ void libbpf_set_print(libbpf_print_fn_t warn,
#define STRERR_BUFSIZE 128
-#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
-#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
-#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
-
-static const char *libbpf_strerror_table[NR_ERRNO] = {
- [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
- [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
- [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
- [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
- [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
- [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
- [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
- [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
- [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
- [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
- [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
- [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
-};
-
-int libbpf_strerror(int err, char *buf, size_t size)
-{
- if (!buf || !size)
- return -1;
-
- err = err > 0 ? err : -err;
-
- if (err < __LIBBPF_ERRNO__START) {
- int ret;
-
- ret = strerror_r(err, buf, size);
- buf[size - 1] = '\0';
- return ret;
- }
-
- if (err < __LIBBPF_ERRNO__END) {
- const char *msg;
-
- msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- snprintf(buf, size, "%s", msg);
- buf[size - 1] = '\0';
- return 0;
- }
-
- snprintf(buf, size, "Unknown libbpf error %d", err);
- buf[size - 1] = '\0';
- return -1;
-}
-
#define CHECK_ERR(action, err, out) do { \
err = action; \
if (err) \
@@ -235,6 +189,7 @@ struct bpf_object {
size_t nr_maps;
bool loaded;
+ bool has_pseudo_calls;
/*
* Information when doing elf related work. Only valid if fd
@@ -369,7 +324,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
progs = obj->programs;
nr_progs = obj->nr_programs;
- progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
+ progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
if (!progs) {
/*
* In this case the original obj->programs
@@ -401,10 +356,6 @@ bpf_object__init_prog_names(struct bpf_object *obj)
const char *name = NULL;
prog = &obj->programs[pi];
- if (prog->idx == obj->efile.text_shndx) {
- name = ".text";
- goto skip_search;
- }
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) {
@@ -427,12 +378,15 @@ bpf_object__init_prog_names(struct bpf_object *obj)
}
}
+ if (!name && prog->idx == obj->efile.text_shndx)
+ name = ".text";
+
if (!name) {
pr_warning("failed to find sym for prog %s\n",
prog->section_name);
return -EINVAL;
}
-skip_search:
+
prog->name = strdup(name);
if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n",
@@ -871,8 +825,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
continue;
}
- reloc = realloc(reloc,
- sizeof(*obj->efile.reloc) * nr_reloc);
+ reloc = reallocarray(reloc, nr_reloc,
+ sizeof(*obj->efile.reloc));
if (!reloc) {
pr_warning("realloc failed\n");
err = -ENOMEM;
@@ -982,6 +936,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
prog->reloc_desc[i].type = RELO_CALL;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].text_off = sym.st_value;
+ obj->has_pseudo_calls = true;
continue;
}
@@ -1085,6 +1040,53 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
return 0;
}
+int bpf_map__reuse_fd(struct bpf_map *map, int fd)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int new_fd, err;
+ char *new_name;
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err)
+ return err;
+
+ new_name = strdup(info.name);
+ if (!new_name)
+ return -errno;
+
+ new_fd = open("/", O_RDONLY | O_CLOEXEC);
+ if (new_fd < 0)
+ goto err_free_new_name;
+
+ new_fd = dup3(fd, new_fd, O_CLOEXEC);
+ if (new_fd < 0)
+ goto err_close_new_fd;
+
+ err = zclose(map->fd);
+ if (err)
+ goto err_close_new_fd;
+ free(map->name);
+
+ map->fd = new_fd;
+ map->name = new_name;
+ map->def.type = info.type;
+ map->def.key_size = info.key_size;
+ map->def.value_size = info.value_size;
+ map->def.max_entries = info.max_entries;
+ map->def.map_flags = info.map_flags;
+ map->btf_key_type_id = info.btf_key_type_id;
+ map->btf_value_type_id = info.btf_value_type_id;
+
+ return 0;
+
+err_close_new_fd:
+ close(new_fd);
+err_free_new_name:
+ free(new_name);
+ return -errno;
+}
+
static int
bpf_object__create_maps(struct bpf_object *obj)
{
@@ -1097,6 +1099,12 @@ bpf_object__create_maps(struct bpf_object *obj)
struct bpf_map_def *def = &map->def;
int *pfd = &map->fd;
+ if (map->fd >= 0) {
+ pr_debug("skip map create (preset) %s: fd=%d\n",
+ map->name, map->fd);
+ continue;
+ }
+
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@@ -1167,7 +1175,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
return -LIBBPF_ERRNO__RELOC;
}
new_cnt = prog->insns_cnt + text->insns_cnt;
- new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+ new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
if (!new_insn) {
pr_warning("oom in prog realloc\n");
return -ENOMEM;
@@ -1431,6 +1439,12 @@ out:
return err;
}
+static bool bpf_program__is_function_storage(struct bpf_program *prog,
+ struct bpf_object *obj)
+{
+ return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
+}
+
static int
bpf_object__load_progs(struct bpf_object *obj)
{
@@ -1438,7 +1452,7 @@ bpf_object__load_progs(struct bpf_object *obj)
int err;
for (i = 0; i < obj->nr_programs; i++) {
- if (obj->programs[i].idx == obj->efile.text_shndx)
+ if (bpf_program__is_function_storage(&obj->programs[i], obj))
continue;
err = bpf_program__load(&obj->programs[i],
obj->license,
@@ -1518,15 +1532,26 @@ out:
return ERR_PTR(err);
}
-struct bpf_object *bpf_object__open(const char *path)
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
{
/* param validation */
- if (!path)
+ if (!attr->file)
return NULL;
- pr_debug("loading %s\n", path);
+ pr_debug("loading %s\n", attr->file);
+
+ return __bpf_object__open(attr->file, NULL, 0,
+ bpf_prog_type__needs_kver(attr->prog_type));
+}
- return __bpf_object__open(path, NULL, 0, true);
+struct bpf_object *bpf_object__open(const char *path)
+{
+ struct bpf_object_open_attr attr = {
+ .file = path,
+ .prog_type = BPF_PROG_TYPE_UNSPEC,
+ };
+
+ return bpf_object__open_xattr(&attr);
}
struct bpf_object *bpf_object__open_buffer(void *obj_buf,
@@ -1863,8 +1888,8 @@ void *bpf_object__priv(struct bpf_object *obj)
return obj ? obj->priv : ERR_PTR(-EINVAL);
}
-struct bpf_program *
-bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+static struct bpf_program *
+__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
{
size_t idx;
@@ -1885,6 +1910,18 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
return &obj->programs[idx];
}
+struct bpf_program *
+bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+{
+ struct bpf_program *prog = prev;
+
+ do {
+ prog = __bpf_program__next(prog, obj);
+ } while (prog && bpf_program__is_function_storage(prog, obj));
+
+ return prog;
+}
+
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv)
{
@@ -1901,6 +1938,11 @@ void *bpf_program__priv(struct bpf_program *prog)
return prog ? prog->priv : ERR_PTR(-EINVAL);
}
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
+{
+ prog->prog_ifindex = ifindex;
+}
+
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
{
const char *title;
@@ -2042,9 +2084,11 @@ static const struct {
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
+ BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
+ BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
@@ -2060,23 +2104,31 @@ static const struct {
#undef BPF_S_PROG_SEC
#undef BPF_SA_PROG_SEC
-static int bpf_program__identify_section(struct bpf_program *prog)
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
{
int i;
- if (!prog->section_name)
- goto err;
-
- for (i = 0; i < ARRAY_SIZE(section_names); i++)
- if (strncmp(prog->section_name, section_names[i].sec,
- section_names[i].len) == 0)
- return i;
+ if (!name)
+ return -EINVAL;
-err:
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+ *prog_type = section_names[i].prog_type;
+ *expected_attach_type = section_names[i].expected_attach_type;
+ return 0;
+ }
+ return -EINVAL;
+}
- return -1;
+static int
+bpf_program__identify_section(struct bpf_program *prog,
+ enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ return libbpf_prog_type_by_name(prog->section_name, prog_type,
+ expected_attach_type);
}
int bpf_map__fd(struct bpf_map *map)
@@ -2125,6 +2177,16 @@ void *bpf_map__priv(struct bpf_map *map)
return map ? map->priv : ERR_PTR(-EINVAL);
}
+bool bpf_map__is_offload_neutral(struct bpf_map *map)
+{
+ return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+{
+ map->map_ifindex = ifindex;
+}
+
struct bpf_map *
bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
{
@@ -2199,12 +2261,15 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
{
+ struct bpf_object_open_attr open_attr = {
+ .file = attr->file,
+ .prog_type = attr->prog_type,
+ };
struct bpf_program *prog, *first_prog = NULL;
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
struct bpf_object *obj;
struct bpf_map *map;
- int section_idx;
int err;
if (!attr)
@@ -2212,8 +2277,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
if (!attr->file)
return -EINVAL;
- obj = __bpf_object__open(attr->file, NULL, 0,
- bpf_prog_type__needs_kver(attr->prog_type));
+ obj = bpf_object__open_xattr(&open_attr);
if (IS_ERR_OR_NULL(obj))
return -ENOENT;
@@ -2226,26 +2290,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
prog->prog_ifindex = attr->ifindex;
expected_attach_type = attr->expected_attach_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- section_idx = bpf_program__identify_section(prog);
- if (section_idx < 0) {
+ err = bpf_program__identify_section(prog, &prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ pr_warning("failed to guess program type based on section name %s\n",
+ prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
- prog_type = section_names[section_idx].prog_type;
- expected_attach_type =
- section_names[section_idx].expected_attach_type;
}
bpf_program__set_type(prog, prog_type);
bpf_program__set_expected_attach_type(prog,
expected_attach_type);
- if (prog->idx != obj->efile.text_shndx && !first_prog)
+ if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
first_prog = prog;
}
bpf_map__for_each(map, obj) {
- map->map_ifindex = attr->ifindex;
+ if (!bpf_map__is_offload_neutral(map))
+ map->map_ifindex = attr->ifindex;
}
if (!first_prog) {
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index b33ae02f7d0e..413778a93499 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -66,7 +66,13 @@ void libbpf_set_print(libbpf_print_fn_t warn,
/* Hide internal to user */
struct bpf_object;
+struct bpf_object_open_attr {
+ const char *file;
+ enum bpf_prog_type prog_type;
+};
+
struct bpf_object *bpf_object__open(const char *path);
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr);
struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name);
@@ -92,6 +98,9 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv,
bpf_object_clear_priv_t clear_priv);
void *bpf_object__priv(struct bpf_object *prog);
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type);
+
/* Accessors of bpf_program */
struct bpf_program;
struct bpf_program *bpf_program__next(struct bpf_program *prog,
@@ -109,6 +118,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv,
bpf_program_clear_priv_t clear_priv);
void *bpf_program__priv(struct bpf_program *prog);
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex);
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
@@ -251,6 +261,9 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
void *bpf_map__priv(struct bpf_map *map);
+int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+bool bpf_map__is_offload_neutral(struct bpf_map *map);
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
int bpf_map__pin(struct bpf_map *map, const char *path);
long libbpf_get_error(const void *ptr);
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
new file mode 100644
index 000000000000..d9ba851bd7f9
--- /dev/null
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+/*
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses>
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libbpf.h"
+
+#define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
+#define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
+#define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
+
+static const char *libbpf_strerror_table[NR_ERRNO] = {
+ [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
+ [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
+ [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
+ [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
+ [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
+ [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
+ [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
+ [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
+ [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
+ [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
+ [ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
+ [ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
+};
+
+int libbpf_strerror(int err, char *buf, size_t size)
+{
+ if (!buf || !size)
+ return -1;
+
+ err = err > 0 ? err : -err;
+
+ if (err < __LIBBPF_ERRNO__START) {
+ int ret;
+
+ ret = strerror_r(err, buf, size);
+ buf[size - 1] = '\0';
+ return ret;
+ }
+
+ if (err < __LIBBPF_ERRNO__END) {
+ const char *msg;
+
+ msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
+ snprintf(buf, size, "%s", msg);
+ buf[size - 1] = '\0';
+ return 0;
+ }
+
+ snprintf(buf, size, "Unknown libbpf error %d", err);
+ buf[size - 1] = '\0';
+ return -1;
+}
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index a362e3d7abc6..5169a97eb68b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -61,6 +61,7 @@ $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
$(OUTPUT)/test_sock: cgroup_helpers.c
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
+$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index c87b4e052ce9..cf16948aad4a 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -118,7 +118,7 @@ static int join_cgroup_from_top(char *cgroup_path)
*
* On success, it returns 0, otherwise on failure it returns 1.
*/
-int join_cgroup(char *path)
+int join_cgroup(const char *path)
{
char cgroup_path[PATH_MAX + 1];
@@ -158,7 +158,7 @@ void cleanup_cgroup_environment(void)
* On success, it returns the file descriptor. On failure it returns 0.
* If there is a failure, it prints the error to stderr.
*/
-int create_and_get_cgroup(char *path)
+int create_and_get_cgroup(const char *path)
{
char cgroup_path[PATH_MAX + 1];
int fd;
@@ -186,7 +186,7 @@ int create_and_get_cgroup(char *path)
* which is an invalid cgroup id.
* If there is a failure, it prints the error to stderr.
*/
-unsigned long long get_cgroup_id(char *path)
+unsigned long long get_cgroup_id(const char *path)
{
int dirfd, err, flags, mount_id, fhsize;
union {
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
index 20a4a5dcd469..d64bb8957090 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.h
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -9,10 +9,10 @@
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
-int create_and_get_cgroup(char *path);
-int join_cgroup(char *path);
+int create_and_get_cgroup(const char *path);
+int join_cgroup(const char *path);
int setup_cgroup_environment(void);
void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(char *path);
+unsigned long long get_cgroup_id(const char *path);
#endif
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index be800d0e7a84..d59642e70f56 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -158,8 +158,9 @@ def tool(name, args, flags, JSON=True, ns="", fail=True, include_stderr=False):
else:
return ret, out
-def bpftool(args, JSON=True, ns="", fail=True):
- return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False):
+ return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns,
+ fail=fail, include_stderr=include_stderr)
def bpftool_prog_list(expected=None, ns=""):
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
@@ -201,6 +202,21 @@ def bpftool_map_list_wait(expected=0, n_retry=20):
time.sleep(0.05)
raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps))
+def bpftool_prog_load(sample, file_name, maps=[], prog_type="xdp", dev=None,
+ fail=True, include_stderr=False):
+ args = "prog load %s %s" % (os.path.join(bpf_test_dir, sample), file_name)
+ if prog_type is not None:
+ args += " type " + prog_type
+ if dev is not None:
+ args += " dev " + dev
+ if len(maps):
+ args += " map " + " map ".join(maps)
+
+ res = bpftool(args, fail=fail, include_stderr=include_stderr)
+ if res[0] == 0:
+ files.append(file_name)
+ return res
+
def ip(args, force=False, JSON=True, ns="", fail=True, include_stderr=False):
if force:
args = "-force " + args
@@ -307,21 +323,25 @@ class NetdevSim:
Class for netdevsim netdevice and its attributes.
"""
- def __init__(self):
+ def __init__(self, link=None):
+ self.link = link
+
self.dev = self._netdevsim_create()
devs.append(self)
self.ns = ""
self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+ self.sdev_dir = self.dfs_dir + '/sdev/'
self.dfs_refresh()
def __getitem__(self, key):
return self.dev[key]
def _netdevsim_create(self):
+ link = "" if self.link is None else "link " + self.link.dev['ifname']
_, old = ip("link show")
- ip("link add sim%d type netdevsim")
+ ip("link add sim%d {link} type netdevsim".format(link=link))
_, new = ip("link show")
for dev in new:
@@ -339,13 +359,18 @@ class NetdevSim:
self.dfs = DebugfsDir(self.dfs_dir)
return self.dfs
+ def dfs_read(self, f):
+ path = os.path.join(self.dfs_dir, f)
+ _, data = cmd('cat %s' % (path))
+ return data.strip()
+
def dfs_num_bound_progs(self):
- path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+ path = os.path.join(self.sdev_dir, "bpf_bound_progs")
_, progs = cmd('ls %s' % (path))
return len(progs.split())
def dfs_get_bound_progs(self, expected):
- progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+ progs = DebugfsDir(os.path.join(self.sdev_dir, "bpf_bound_progs"))
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs bound, expected %d" %
@@ -547,11 +572,11 @@ def check_extack(output, reference, args):
if skip_extack:
return
lines = output.split("\n")
- comp = len(lines) >= 2 and lines[1] == reference
+ comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference
fail(not comp, "Missing or incorrect netlink extack message")
def check_extack_nsim(output, reference, args):
- check_extack(output, "Error: netdevsim: " + reference, args)
+ check_extack(output, "netdevsim: " + reference, args)
def check_no_extack(res, needle):
fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
@@ -654,7 +679,7 @@ try:
ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "TC filter loaded without enabling TC offloads")
- check_extack(err, "Error: TC offload is disabled on net device.", args)
+ check_extack(err, "TC offload is disabled on net device.", args)
sim.wait_for_flush()
sim.set_ethtool_tc_offloads(True)
@@ -694,7 +719,7 @@ try:
skip_sw=True,
fail=False, include_stderr=True)
fail(ret == 0, "Offloaded a filter to chain other than 0")
- check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+ check_extack(err, "Driver supports only offload of chain 0.", args)
sim.tc_flush_filters()
start_test("Test TC replace...")
@@ -814,24 +839,20 @@ try:
"Device parameters reported for non-offloaded program")
start_test("Test XDP prog replace with bad flags...")
- ret, _, err = sim.set_xdp(obj, "offload", force=True,
+ ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ check_extack(err, "program loaded with different flags.", args)
start_test("Test XDP prog remove with bad flags...")
- ret, _, err = sim.unset_xdp("offload", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
ret, _, err = sim.unset_xdp("", force=True,
fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode mode")
- check_extack_nsim(err, "program loaded with different flags.", args)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
start_test("Test MTU restrictions...")
ret, _ = sim.set_mtu(9000, fail=False)
@@ -846,6 +867,25 @@ try:
sim.set_mtu(1500)
sim.wait_for_flush()
+ start_test("Test non-offload XDP attaching to HW...")
+ bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/nooffload")
+ nooffload = bpf_pinned("/sys/fs/bpf/nooffload")
+ ret, _, err = sim.set_xdp(nooffload, "offload",
+ fail=False, include_stderr=True)
+ fail(ret == 0, "attached non-offloaded XDP program to HW")
+ check_extack_nsim(err, "xdpoffload of non-bound program.", args)
+ rm("/sys/fs/bpf/nooffload")
+
+ start_test("Test offload XDP attaching to drv...")
+ bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload",
+ dev=sim['ifname'])
+ offload = bpf_pinned("/sys/fs/bpf/offload")
+ ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True)
+ fail(ret == 0, "attached offloaded XDP program to drv")
+ check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args)
+ rm("/sys/fs/bpf/offload")
+ sim.wait_for_flush()
+
start_test("Test XDP offload...")
_, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True)
ipl = sim.ip_link_show(xdp=True)
@@ -891,6 +931,60 @@ try:
rm(pin_file)
bpftool_prog_list_wait(expected=0)
+ start_test("Test multi-attachment XDP - attach...")
+ sim.set_xdp(obj, "offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+ fail("prog" not in xdp, "Base program not reported in single program mode")
+ fail(len(ipl["xdp"]["attached"]) != 1,
+ "Wrong attached program count with one program")
+
+ sim.set_xdp(obj, "")
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded2 = sim.dfs_read("bpf_offloaded_id")
+
+ fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+ fail("prog" in two_xdps, "Base program reported in multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after driver activated")
+ fail(len(two_xdps["attached"]) != 2,
+ "Wrong attached program count with two programs")
+ fail(two_xdps["attached"][0]["prog"]["id"] ==
+ two_xdps["attached"][1]["prog"]["id"],
+ "offloaded and drv programs have the same id")
+ fail(offloaded != offloaded2,
+ "offload ID changed after loading driver program")
+
+ start_test("Test multi-attachment XDP - replace...")
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(err.count("busy") != 1, "Replaced one of programs without -force")
+
+ start_test("Test multi-attachment XDP - detach...")
+ ret, _, err = sim.unset_xdp("drv", force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
+
+ sim.unset_xdp("offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+
+ fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
+ fail("prog" not in xdp,
+ "Base program not reported after multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after driver activated")
+ fail(len(ipl["xdp"]["attached"]) != 1,
+ "Wrong attached program count with remaining programs")
+ fail(offloaded != "0", "offload ID reported with only driver program left")
+
+ start_test("Test multi-attachment XDP - device remove...")
+ sim.set_xdp(obj, "offload")
+ sim.remove()
+
+ sim = NetdevSim()
+ sim.set_ethtool_tc_offloads(True)
+
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
sim.set_xdp(obj, "offload")
@@ -1085,6 +1179,106 @@ try:
fail(ret == 0,
"netdevsim didn't refuse to create a map with offload disabled")
+ sim.remove()
+
+ start_test("Test multi-dev ASIC program reuse...")
+ simA = NetdevSim()
+ simB1 = NetdevSim()
+ simB2 = NetdevSim(link=simB1)
+ simB3 = NetdevSim(link=simB1)
+ sims = (simA, simB1, simB2, simB3)
+ simB = (simB1, simB2, simB3)
+
+ bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA",
+ dev=simA['ifname'])
+ progA = bpf_pinned("/sys/fs/bpf/nsimA")
+ bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB",
+ dev=simB1['ifname'])
+ progB = bpf_pinned("/sys/fs/bpf/nsimB")
+
+ simA.set_xdp(progA, "offload", JSON=False)
+ for d in simB:
+ d.set_xdp(progB, "offload", JSON=False)
+
+ start_test("Test multi-dev ASIC cross-dev replace...")
+ ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False)
+ fail(ret == 0, "cross-ASIC program allowed")
+ for d in simB:
+ ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False)
+ fail(ret == 0, "cross-ASIC program allowed")
+
+ start_test("Test multi-dev ASIC cross-dev install...")
+ for d in sims:
+ d.unset_xdp("offload")
+
+ ret, _, err = simA.set_xdp(progB, "offload", force=True, JSON=False,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "cross-ASIC program allowed")
+ check_extack_nsim(err, "program bound to different dev.", args)
+ for d in simB:
+ ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "cross-ASIC program allowed")
+ check_extack_nsim(err, "program bound to different dev.", args)
+
+ start_test("Test multi-dev ASIC cross-dev map reuse...")
+
+ mapA = bpftool("prog show %s" % (progA))[1]["map_ids"][0]
+ mapB = bpftool("prog show %s" % (progB))[1]["map_ids"][0]
+
+ ret, _ = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
+ dev=simB3['ifname'],
+ maps=["idx 0 id %d" % (mapB)],
+ fail=False)
+ fail(ret != 0, "couldn't reuse a map on the same ASIC")
+ rm("/sys/fs/bpf/nsimB_")
+
+ ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimA_",
+ dev=simA['ifname'],
+ maps=["idx 0 id %d" % (mapB)],
+ fail=False, include_stderr=True)
+ fail(ret == 0, "could reuse a map on a different ASIC")
+ fail(err.count("offload device mismatch between prog and map") == 0,
+ "error message missing for cross-ASIC map")
+
+ ret, _, err = bpftool_prog_load("sample_map_ret0.o", "/sys/fs/bpf/nsimB_",
+ dev=simB1['ifname'],
+ maps=["idx 0 id %d" % (mapA)],
+ fail=False, include_stderr=True)
+ fail(ret == 0, "could reuse a map on a different ASIC")
+ fail(err.count("offload device mismatch between prog and map") == 0,
+ "error message missing for cross-ASIC map")
+
+ start_test("Test multi-dev ASIC cross-dev destruction...")
+ bpftool_prog_list_wait(expected=2)
+
+ simA.remove()
+ bpftool_prog_list_wait(expected=1)
+
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB != simB1['ifname'], "program not bound to originial device")
+ simB1.remove()
+ bpftool_prog_list_wait(expected=1)
+
+ start_test("Test multi-dev ASIC cross-dev destruction - move...")
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB not in (simB2['ifname'], simB3['ifname']),
+ "program not bound to remaining devices")
+
+ simB2.remove()
+ ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
+ fail(ifnameB != simB3['ifname'], "program not bound to remaining device")
+
+ simB3.remove()
+ bpftool_prog_list_wait(expected=0)
+
+ start_test("Test multi-dev ASIC cross-dev destruction - orphaned...")
+ ret, out = bpftool("prog show %s" % (progB), fail=False)
+ fail(ret == 0, "got information about orphaned program")
+ fail("error" not in out, "no error reported for get info on orphaned")
+ fail(out["error"] != "can't get prog info: No such device",
+ "wrong error for get info on orphaned")
+
print("%s: OK" % (os.path.basename(__file__)))
finally:
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index a5e76b9219b9..2e45c92d1111 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -998,8 +998,9 @@ int init_pktinfo(int domain, struct cmsghdr *cmsg)
return 0;
}
-static int sendmsg_to_server(const struct sockaddr_storage *addr,
- socklen_t addr_len, int set_cmsg, int *syscall_err)
+static int sendmsg_to_server(int type, const struct sockaddr_storage *addr,
+ socklen_t addr_len, int set_cmsg, int flags,
+ int *syscall_err)
{
union {
char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
@@ -1022,7 +1023,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
goto err;
}
- fd = socket(domain, SOCK_DGRAM, 0);
+ fd = socket(domain, type, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto err;
@@ -1052,7 +1053,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
}
}
- if (sendmsg(fd, &hdr, 0) != sizeof(data)) {
+ if (sendmsg(fd, &hdr, flags) != sizeof(data)) {
log_err("Fail to send message to server");
*syscall_err = errno;
goto err;
@@ -1066,6 +1067,15 @@ out:
return fd;
}
+static int fastconnect_to_server(const struct sockaddr_storage *addr,
+ socklen_t addr_len)
+{
+ int sendmsg_err;
+
+ return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0,
+ MSG_FASTOPEN, &sendmsg_err);
+}
+
static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr)
{
struct timeval tv;
@@ -1185,6 +1195,20 @@ static int run_connect_test_case(const struct sock_addr_test *test)
if (cmp_local_ip(clientfd, &expected_src_addr))
goto err;
+ if (test->type == SOCK_STREAM) {
+ /* Test TCP Fast Open scenario */
+ clientfd = fastconnect_to_server(&requested_addr, addr_len);
+ if (clientfd == -1)
+ goto err;
+
+ /* Make sure src and dst addrs were overridden properly */
+ if (cmp_peer_addr(clientfd, &expected_addr))
+ goto err;
+
+ if (cmp_local_ip(clientfd, &expected_src_addr))
+ goto err;
+ }
+
goto out;
err:
err = -1;
@@ -1222,8 +1246,9 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
if (clientfd >= 0)
close(clientfd);
- clientfd = sendmsg_to_server(&requested_addr, addr_len,
- set_cmsg, &err);
+ clientfd = sendmsg_to_server(test->type, &requested_addr,
+ addr_len, set_cmsg, /*flags*/0,
+ &err);
if (err)
goto out;
else if (clientfd == -1)
diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h
index 2fe43289943c..7bcfa6207005 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf.h
+++ b/tools/testing/selftests/bpf/test_tcpbpf.h
@@ -12,5 +12,6 @@ struct tcpbpf_globals {
__u32 good_cb_test_rv;
__u64 bytes_received;
__u64 bytes_acked;
+ __u32 num_listen;
};
#endif
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
index 3e645ee41ed5..4b7fd540cea9 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -96,15 +96,22 @@ int bpf_testcb(struct bpf_sock_ops *skops)
if (!gp)
break;
g = *gp;
- g.total_retrans = skops->total_retrans;
- g.data_segs_in = skops->data_segs_in;
- g.data_segs_out = skops->data_segs_out;
- g.bytes_received = skops->bytes_received;
- g.bytes_acked = skops->bytes_acked;
+ if (skops->args[0] == BPF_TCP_LISTEN) {
+ g.num_listen++;
+ } else {
+ g.total_retrans = skops->total_retrans;
+ g.data_segs_in = skops->data_segs_in;
+ g.data_segs_out = skops->data_segs_out;
+ g.bytes_received = skops->bytes_received;
+ g.bytes_acked = skops->bytes_acked;
+ }
bpf_map_update_elem(&global_map, &key, &g,
BPF_ANY);
}
break;
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
+ break;
default:
rv = -1;
}
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index 84ab5163c828..a275c2971376 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -1,27 +1,59 @@
// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdio.h>
#include <unistd.h>
#include <errno.h>
-#include <signal.h>
#include <string.h>
-#include <assert.h>
-#include <linux/perf_event.h>
-#include <linux/ptrace.h>
#include <linux/bpf.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_util.h"
+
#include "bpf_rlimit.h"
-#include <linux/perf_event.h>
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+
#include "test_tcpbpf.h"
+#define EXPECT_EQ(expected, actual, fmt) \
+ do { \
+ if ((expected) != (actual)) { \
+ printf(" Value of: " #actual "\n" \
+ " Actual: %" fmt "\n" \
+ " Expected: %" fmt "\n", \
+ (actual), (expected)); \
+ goto err; \
+ } \
+ } while (0)
+
+int verify_result(const struct tcpbpf_globals *result)
+{
+ __u32 expected_events;
+
+ expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
+ (1 << BPF_SOCK_OPS_RWND_INIT) |
+ (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) |
+ (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) |
+ (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) |
+ (1 << BPF_SOCK_OPS_NEEDS_ECN) |
+ (1 << BPF_SOCK_OPS_STATE_CB) |
+ (1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
+
+ EXPECT_EQ(expected_events, result->event_map, "#" PRIx32);
+ EXPECT_EQ(501ULL, result->bytes_received, "llu");
+ EXPECT_EQ(1002ULL, result->bytes_acked, "llu");
+ EXPECT_EQ(1, result->data_segs_in, PRIu32);
+ EXPECT_EQ(1, result->data_segs_out, PRIu32);
+ EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32);
+ EXPECT_EQ(0, result->good_cb_test_rv, PRIu32);
+ EXPECT_EQ(1, result->num_listen, PRIu32);
+
+ return 0;
+err:
+ return -1;
+}
+
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
@@ -35,42 +67,28 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
return bpf_map__fd(map);
}
-#define SYSTEM(CMD) \
- do { \
- if (system(CMD)) { \
- printf("system(%s) FAILS!\n", CMD); \
- } \
- } while (0)
-
int main(int argc, char **argv)
{
const char *file = "test_tcpbpf_kern.o";
struct tcpbpf_globals g = {0};
- int cg_fd, prog_fd, map_fd;
- bool debug_flag = false;
+ const char *cg_path = "/foo";
int error = EXIT_FAILURE;
struct bpf_object *obj;
- char cmd[100], *dir;
- struct stat buffer;
+ int prog_fd, map_fd;
+ int cg_fd = -1;
__u32 key = 0;
- int pid;
int rv;
- if (argc > 1 && strcmp(argv[1], "-d") == 0)
- debug_flag = true;
+ if (setup_cgroup_environment())
+ goto err;
- dir = "/tmp/cgroupv2/foo";
+ cg_fd = create_and_get_cgroup(cg_path);
+ if (!cg_fd)
+ goto err;
- if (stat(dir, &buffer) != 0) {
- SYSTEM("mkdir -p /tmp/cgroupv2");
- SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
- SYSTEM("mkdir -p /tmp/cgroupv2/foo");
- }
- pid = (int) getpid();
- sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
- SYSTEM(cmd);
+ if (join_cgroup(cg_path))
+ goto err;
- cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;
@@ -83,7 +101,10 @@ int main(int argc, char **argv)
goto err;
}
- SYSTEM("./tcp_server.py");
+ if (system("./tcp_server.py")) {
+ printf("FAILED: TCP server\n");
+ goto err;
+ }
map_fd = bpf_find_map(__func__, obj, "global_map");
if (map_fd < 0)
@@ -95,34 +116,16 @@ int main(int argc, char **argv)
goto err;
}
- if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
- g.data_segs_in != 1 || g.data_segs_out != 1 ||
- (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
- g.good_cb_test_rv != 0) {
+ if (verify_result(&g)) {
printf("FAILED: Wrong stats\n");
- if (debug_flag) {
- printf("\n");
- printf("bytes_received: %d (expecting 501)\n",
- (int)g.bytes_received);
- printf("bytes_acked: %d (expecting 1002)\n",
- (int)g.bytes_acked);
- printf("data_segs_in: %d (expecting 1)\n",
- g.data_segs_in);
- printf("data_segs_out: %d (expecting 1)\n",
- g.data_segs_out);
- printf("event_map: 0x%x (at least 0x47e)\n",
- g.event_map);
- printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
- g.bad_cb_test_rv);
- printf("good_cb_test_rv:0x%x (expecting 0)\n",
- g.good_cb_test_rv);
- }
goto err;
}
+
printf("PASSED!\n");
error = 0;
err:
bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+ close(cg_fd);
+ cleanup_cgroup_environment();
return error;
-
}
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 3868dcb63420..cabe2a3a3b30 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -88,7 +88,7 @@ static int page_size;
static int page_cnt = 8;
static struct perf_event_mmap_page *header;
-int perf_event_mmap(int fd)
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header)
{
void *base;
int mmap_size;
@@ -102,10 +102,15 @@ int perf_event_mmap(int fd)
return -1;
}
- header = base;
+ *header = base;
return 0;
}
+int perf_event_mmap(int fd)
+{
+ return perf_event_mmap_header(fd, &header);
+}
+
static int perf_event_poll(int fd)
{
struct pollfd pfd = { .fd = fd, .events = POLLIN };
@@ -163,3 +168,42 @@ int perf_event_poller(int fd, perf_event_print_fn output_fn)
return ret;
}
+
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+ int num_fds, perf_event_print_fn output_fn)
+{
+ enum bpf_perf_event_ret ret;
+ struct pollfd *pfds;
+ void *buf = NULL;
+ size_t len = 0;
+ int i;
+
+ pfds = calloc(num_fds, sizeof(*pfds));
+ if (!pfds)
+ return LIBBPF_PERF_EVENT_ERROR;
+
+ for (i = 0; i < num_fds; i++) {
+ pfds[i].fd = fds[i];
+ pfds[i].events = POLLIN;
+ }
+
+ for (;;) {
+ poll(pfds, num_fds, 1000);
+ for (i = 0; i < num_fds; i++) {
+ if (!pfds[i].revents)
+ continue;
+
+ ret = bpf_perf_event_read_simple(headers[i],
+ page_cnt * page_size,
+ page_size, &buf, &len,
+ bpf_perf_event_print,
+ output_fn);
+ if (ret != LIBBPF_PERF_EVENT_CONT)
+ break;
+ }
+ }
+ free(buf);
+ free(pfds);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 3b4bcf7f5084..18924f23db1b 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -3,6 +3,7 @@
#define __TRACE_HELPER_H
#include <libbpf.h>
+#include <linux/perf_event.h>
struct ksym {
long addr;
@@ -16,6 +17,9 @@ long ksym_get_addr(const char *name);
typedef enum bpf_perf_event_ret (*perf_event_print_fn)(void *data, int size);
int perf_event_mmap(int fd);
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header);
/* return LIBBPF_PERF_EVENT_DONE or LIBBPF_PERF_EVENT_ERROR */
int perf_event_poller(int fd, perf_event_print_fn output_fn);
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+ int num_fds, perf_event_print_fn output_fn);
#endif
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
new file mode 100755
index 000000000000..76f1ab4898d9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
@@ -0,0 +1,217 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test uses standard topology for testing gretap. See
+# ../../../net/forwarding/mirror_gre_topo_lib.sh for more details.
+#
+# Test offloading various features of offloading gretap mirrors specific to
+# mlxsw.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/mirror_lib.sh
+source $lib_dir/mirror_gre_lib.sh
+source $lib_dir/mirror_gre_topo_lib.sh
+
+setup_keyful()
+{
+ tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \
+ ttl 100 tos inherit allow-localremote \
+ key 1234
+
+ tunnel_create h3-gt6-key ip6gretap 2001:db8:3::2 2001:db8:3::1 \
+ key 1234
+ ip link set h3-gt6-key vrf v$h3
+ matchall_sink_create h3-gt6-key
+
+ ip address add dev $swp3 2001:db8:3::1/64
+ ip address add dev $h3 2001:db8:3::2/64
+}
+
+cleanup_keyful()
+{
+ ip address del dev $h3 2001:db8:3::2/64
+ ip address del dev $swp3 2001:db8:3::1/64
+
+ tunnel_destroy h3-gt6-key
+ tunnel_destroy gt6-key
+}
+
+setup_soft()
+{
+ # Set up a topology for testing underlay routes that point at an
+ # unsupported soft device.
+
+ tunnel_create gt6-soft ip6gretap 2001:db8:4::1 2001:db8:4::2 \
+ ttl 100 tos inherit allow-localremote
+
+ tunnel_create h3-gt6-soft ip6gretap 2001:db8:4::2 2001:db8:4::1
+ ip link set h3-gt6-soft vrf v$h3
+ matchall_sink_create h3-gt6-soft
+
+ ip link add name v1 type veth peer name v2
+ ip link set dev v1 up
+ ip address add dev v1 2001:db8:4::1/64
+
+ ip link set dev v2 vrf v$h3
+ ip link set dev v2 up
+ ip address add dev v2 2001:db8:4::2/64
+}
+
+cleanup_soft()
+{
+ ip link del dev v1
+
+ tunnel_destroy h3-gt6-soft
+ tunnel_destroy gt6-soft
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip address add dev $swp3 2001:db8:2::1/64
+ ip address add dev $h3 2001:db8:2::2/64
+
+ ip address add dev $swp3 192.0.2.129/28
+ ip address add dev $h3 192.0.2.130/28
+
+ setup_keyful
+ setup_soft
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ cleanup_soft
+ cleanup_keyful
+
+ ip address del dev $h3 2001:db8:2::2/64
+ ip address del dev $swp3 2001:db8:2::1/64
+
+ ip address del dev $h3 192.0.2.130/28
+ ip address del dev $swp3 192.0.2.129/28
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_span_gre_ttl_inherit()
+{
+ local tundev=$1; shift
+ local type=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ip link set dev $tundev type $type ttl inherit
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ fail_test_span_gre_dir $tundev ingress
+
+ ip link set dev $tundev type $type ttl 100
+
+ quick_test_span_gre_dir $tundev ingress
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: no offload on TTL of inherit ($tcflags)"
+}
+
+test_span_gre_tos_fixed()
+{
+ local tundev=$1; shift
+ local type=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ip link set dev $tundev type $type tos 0x10
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ fail_test_span_gre_dir $tundev ingress
+
+ ip link set dev $tundev type $type tos inherit
+ quick_test_span_gre_dir $tundev ingress
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: no offload on a fixed TOS ($tcflags)"
+}
+
+test_span_failable()
+{
+ local should_fail=$1; shift
+ local tundev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress $tundev "matchall $tcflags"
+ if ((should_fail)); then
+ fail_test_span_gre_dir $tundev ingress
+ else
+ quick_test_span_gre_dir $tundev ingress
+ fi
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what: should_fail=$should_fail ($tcflags)"
+}
+
+test_failable()
+{
+ local should_fail=$1; shift
+
+ test_span_failable $should_fail gt6-key "mirror to keyful gretap"
+ test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay"
+}
+
+test_sw()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ test_failable 0
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+test_hw()
+{
+ test_failable 1
+
+ test_span_gre_tos_fixed gt4 gretap "mirror to gretap"
+ test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap"
+
+ test_span_gre_ttl_inherit gt4 gretap "mirror to gretap"
+ test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+if ! tc_offload_check; then
+ check_err 1 "Could not test offloaded functionality"
+ log_test "mlxsw-specific tests for mirror to gretap"
+ exit
+fi
+
+tcflags="skip_hw"
+test_sw
+
+tcflags="skip_sw"
+test_hw
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
new file mode 100644
index 000000000000..6f3a70df63bc
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Test offloading a number of mirrors-to-gretap. The test creates a number of
+# tunnels. Then it adds one flower mirror for each of the tunnels, matching a
+# given host IP. Then it generates traffic at each of the host IPs and checks
+# that the traffic has been mirrored at the appropriate tunnel.
+#
+# +--------------------------+ +--------------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 2001:db8:1:X::1/64 | | 2001:db8:1:X::2/64 | |
+# +-----|--------------------+ +--------------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o--> mirrors | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 BR $swp2 + | |
+# | +---------------------------------------------------------------------+ |
+# | |
+# | + $swp3 + gt6-<X> (ip6gretap) |
+# | | 2001:db8:2:X::1/64 : loc=2001:db8:2:X::1 |
+# | | : rem=2001:db8:2:X::2 |
+# | | : ttl=100 |
+# | | : tos=inherit |
+# | | : |
+# +-----|--------------------------------:----------------------------------+
+# | :
+# +-----|--------------------------------:----------------------------------+
+# | H3 + $h3 + h3-gt6-<X> (ip6gretap) |
+# | 2001:db8:2:X::2/64 loc=2001:db8:2:X::2 |
+# | rem=2001:db8:2:X::1 |
+# | ttl=100 |
+# | tos=inherit |
+# | |
+# +-------------------------------------------------------------------------+
+
+source ../../../../net/forwarding/mirror_lib.sh
+
+MIRROR_NUM_NETIFS=6
+
+mirror_gre_ipv6_addr()
+{
+ local net=$1; shift
+ local num=$1; shift
+
+ printf "2001:db8:%x:%x" $net $num
+}
+
+mirror_gre_tunnels_create()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ MIRROR_GRE_BATCH_FILE="$(mktemp)"
+ for ((i=0; i < count; ++i)); do
+ local match_dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ local htun=h3-gt6-$i
+ local tun=gt6-$i
+
+ ((mirror_gre_tunnels++))
+
+ ip address add dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+ ip address add dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+
+ ip address add dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+ ip address add dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+
+ tunnel_create $tun ip6gretap \
+ $(mirror_gre_ipv6_addr 2 $i)::1 \
+ $(mirror_gre_ipv6_addr 2 $i)::2 \
+ ttl 100 tos inherit allow-localremote
+
+ tunnel_create $htun ip6gretap \
+ $(mirror_gre_ipv6_addr 2 $i)::2 \
+ $(mirror_gre_ipv6_addr 2 $i)::1
+ ip link set $htun vrf v$h3
+ matchall_sink_create $htun
+
+ cat >> $MIRROR_GRE_BATCH_FILE <<-EOF
+ filter add dev $swp1 ingress pref 1000 \
+ protocol ipv6 \
+ flower $tcflags dst_ip $match_dip \
+ action mirred egress mirror dev $tun
+ EOF
+ done
+
+ tc -b $MIRROR_GRE_BATCH_FILE
+ check_err_fail $should_fail $? "Mirror rule insertion"
+}
+
+mirror_gre_tunnels_destroy()
+{
+ local count=$1; shift
+
+ for ((i=0; i < count; ++i)); do
+ local htun=h3-gt6-$i
+ local tun=gt6-$i
+
+ ip address del dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+ ip address del dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+
+ ip address del dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+ ip address del dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+
+ tunnel_destroy $htun
+ tunnel_destroy $tun
+ done
+}
+
+__mirror_gre_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ mirror_gre_tunnels_create $count $should_fail
+ if ((should_fail)); then
+ return
+ fi
+
+ sleep 5
+
+ for ((i = 0; i < count; ++i)); do
+ local dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ local htun=h3-gt6-$i
+ local message
+
+ icmp6_capture_install $htun
+ mirror_test v$h1 "" $dip $htun 100 10
+ icmp6_capture_uninstall $htun
+ done
+}
+
+mirror_gre_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+ check_err 1 "Could not test offloaded functionality"
+ return
+ fi
+
+ tcflags="skip_sw"
+ __mirror_gre_test $count $should_fail
+}
+
+mirror_gre_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ mirror_gre_tunnels=0
+
+ vrf_prepare
+
+ simple_if_init $h1
+ simple_if_init $h2
+ simple_if_init $h3
+
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ ip link set dev $swp3 up
+}
+
+mirror_gre_cleanup()
+{
+ mirror_gre_tunnels_destroy $mirror_gre_tunnels
+
+ ip link set dev $swp3 down
+
+ ip link set dev $swp2 down
+
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+
+ ip link del dev br1
+
+ simple_if_fini $h3
+ simple_if_fini $h2
+ simple_if_fini $h1
+
+ vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
new file mode 100755
index 000000000000..1ca631d5aaba
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -0,0 +1,189 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for DSCP prioritization and rewrite. Packets ingress $swp1 with a DSCP
+# tag and are prioritized according to the map at $swp1. They egress $swp2 and
+# the DSCP value is updated to match the map at that interface. The updated DSCP
+# tag is verified at $h2.
+#
+# ICMP responses are produced with the same DSCP tag that arrived at $h2. They
+# go through prioritization at $swp2 and DSCP retagging at $swp1. The tag is
+# verified at $h1--it should match the original tag.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | +-|----------------------------------------------------------------|-+ |
+# | | + $swp1 BR $swp2 + | |
+# | | APP=0,5,10 .. 7,5,17 APP=0,5,20 .. 7,5,27 | |
+# | +--------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_dscp
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ local dscp;
+
+ simple_if_init $h1 192.0.2.1/28
+ tc qdisc add dev $h1 clsact
+ dscp_capture_install $h1 10
+}
+
+h1_destroy()
+{
+ dscp_capture_uninstall $h1 10
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28
+ tc qdisc add dev $h2 clsact
+ dscp_capture_install $h2 20
+}
+
+h2_destroy()
+{
+ dscp_capture_uninstall $h2 20
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.2/28
+}
+
+dscp_map()
+{
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ echo app=$prio,5,$((base + prio))
+ done
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ lldptool -T -i $swp1 -V APP $(dscp_map 10) >/dev/null
+ lldptool -T -i $swp2 -V APP $(dscp_map 20) >/dev/null
+ lldpad_app_wait_set $swp1
+ lldpad_app_wait_set $swp2
+}
+
+switch_destroy()
+{
+ lldptool -T -i $swp2 -V APP -d $(dscp_map 20) >/dev/null
+ lldptool -T -i $swp1 -V APP -d $(dscp_map 10) >/dev/null
+ lldpad_app_wait_del
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.2
+}
+
+dscp_ping_test()
+{
+ local vrf_name=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local prio=$1; shift
+ local dev_10=$1; shift
+ local dev_20=$1; shift
+
+ local dscp_10=$(((prio + 10) << 2))
+ local dscp_20=$(((prio + 20) << 2))
+
+ RET=0
+
+ local -A t0s
+ eval "t0s=($(dscp_fetch_stats $dev_10 10)
+ $(dscp_fetch_stats $dev_20 20))"
+
+ ip vrf exec $vrf_name \
+ ${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \
+ -c 10 -i 0.1 -w 2 &> /dev/null
+
+ local -A t1s
+ eval "t1s=($(dscp_fetch_stats $dev_10 10)
+ $(dscp_fetch_stats $dev_20 20))"
+
+ for key in ${!t0s[@]}; do
+ local expect
+ if ((key == prio+10 || key == prio+20)); then
+ expect=10
+ else
+ expect=0
+ fi
+
+ local delta=$((t1s[$key] - t0s[$key]))
+ ((expect == delta))
+ check_err $? "DSCP $key: Expected to capture $expect packets, got $delta."
+ done
+
+ log_test "DSCP rewrite: $dscp_10-(prio $prio)-$dscp_20"
+}
+
+test_dscp()
+{
+ for prio in {0..7}; do
+ dscp_ping_test v$h1 192.0.2.1 192.0.2.2 $prio $h1 $h2
+ done
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
new file mode 100755
index 000000000000..281d90766e12
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -0,0 +1,233 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for DSCP prioritization in the router.
+#
+# With ip_forward_update_priority disabled, the packets are expected to keep
+# their DSCP (which in this test uses only values 0..7) intact as they are
+# forwarded by the switch. That is verified at $h2. ICMP responses are formed
+# with the same DSCP as the requests, and likewise pass through the switch
+# intact, which is verified at $h1.
+#
+# With ip_forward_update_priority enabled, router reprioritizes the packets
+# according to the table in reprioritize(). Thus, say, DSCP 7 maps to priority
+# 4, which on egress maps back to DSCP 4. The response packet then gets
+# reprioritized to 6, getting DSCP 6 on egress.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | + $swp1 $swp2 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | APP=0,5,0 .. 7,5,7 APP=0,5,0 .. 7,5,7 |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_update
+ test_no_update
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=4
+source $lib_dir/lib.sh
+
+reprioritize()
+{
+ local in=$1; shift
+
+ # This is based on rt_tos2priority in include/net/route.h. Assuming 1:1
+ # mapping between priorities and TOS, it yields a new priority for a
+ # packet with ingress priority of $in.
+ local -a reprio=(0 0 2 2 6 6 4 4)
+
+ echo ${reprio[$in]}
+}
+
+h1_create()
+{
+ local dscp;
+
+ simple_if_init $h1 192.0.2.1/28
+ tc qdisc add dev $h1 clsact
+ dscp_capture_install $h1 0
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ dscp_capture_uninstall $h1 0
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ tc qdisc add dev $h2 clsact
+ dscp_capture_install $h2 0
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ dscp_capture_uninstall $h2 0
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+dscp_map()
+{
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ echo app=$prio,5,$((base + prio))
+ done
+}
+
+switch_create()
+{
+ simple_if_init $swp1 192.0.2.2/28
+ __simple_if_init $swp2 v$swp1 192.0.2.17/28
+
+ lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null
+ lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null
+ lldpad_app_wait_set $swp1
+ lldpad_app_wait_set $swp2
+}
+
+switch_destroy()
+{
+ lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null
+ lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null
+ lldpad_app_wait_del
+
+ __simple_if_fini $swp2 192.0.2.17/28
+ simple_if_fini $swp1 192.0.2.2/28
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ sysctl_set net.ipv4.ip_forward_update_priority 1
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+ sysctl_restore net.ipv4.ip_forward_update_priority
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.18
+}
+
+dscp_ping_test()
+{
+ local vrf_name=$1; shift
+ local sip=$1; shift
+ local dip=$1; shift
+ local prio=$1; shift
+ local reprio=$1; shift
+ local dev1=$1; shift
+ local dev2=$1; shift
+
+ local prio2=$($reprio $prio) # ICMP Request egress prio
+ local prio3=$($reprio $prio2) # ICMP Response egress prio
+
+ local dscp=$((prio << 2)) # ICMP Request ingress DSCP
+ local dscp2=$((prio2 << 2)) # ICMP Request egress DSCP
+ local dscp3=$((prio3 << 2)) # ICMP Response egress DSCP
+
+ RET=0
+
+ eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))"
+ eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))"
+
+ ip vrf exec $vrf_name \
+ ${PING} -Q $dscp ${sip:+-I $sip} $dip \
+ -c 10 -i 0.1 -w 2 &> /dev/null
+
+ eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))"
+ eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))"
+
+ for i in {0..7}; do
+ local dscpi=$((i << 2))
+ local expect2=0
+ local expect3=0
+
+ if ((i == prio2)); then
+ expect2=10
+ fi
+ if ((i == prio3)); then
+ expect3=10
+ fi
+
+ local delta=$((dev2_t1s[$i] - dev2_t0s[$i]))
+ ((expect2 == delta))
+ check_err $? "DSCP $dscpi@$dev2: Expected to capture $expect2 packets, got $delta."
+
+ delta=$((dev1_t1s[$i] - dev1_t0s[$i]))
+ ((expect3 == delta))
+ check_err $? "DSCP $dscpi@$dev1: Expected to capture $expect3 packets, got $delta."
+ done
+
+ log_test "DSCP rewrite: $dscp-(prio $prio2)-$dscp2-(prio $prio3)-$dscp3"
+}
+
+__test_update()
+{
+ local update=$1; shift
+ local reprio=$1; shift
+
+ sysctl_restore net.ipv4.ip_forward_update_priority
+ sysctl_set net.ipv4.ip_forward_update_priority $update
+
+ for prio in {0..7}; do
+ dscp_ping_test v$h1 192.0.2.1 192.0.2.18 $prio $reprio $h1 $h2
+ done
+}
+
+test_update()
+{
+ __test_update 1 reprioritize
+}
+
+test_no_update()
+{
+ __test_update 0 echo
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
new file mode 100644
index 000000000000..d231649b4f01
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ROUTER_NUM_NETIFS=4
+
+router_h1_create()
+{
+ simple_if_init $h1 192.0.1.1/24
+ ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1
+}
+
+router_h1_destroy()
+{
+ ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1
+ simple_if_fini $h1 192.0.1.1/24
+}
+
+router_h2_create()
+{
+ simple_if_init $h2 192.0.2.1/24
+ tc qdisc add dev $h2 handle ffff: ingress
+}
+
+router_h2_destroy()
+{
+ tc qdisc del dev $h2 handle ffff: ingress
+ simple_if_fini $h2 192.0.2.1/24
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ ip address add 192.0.1.2/24 dev $rp1
+ ip address add 192.0.2.2/24 dev $rp2
+}
+
+router_destroy()
+{
+ ip address del 192.0.2.2/24 dev $rp2
+ ip address del 192.0.1.2/24 dev $rp1
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+router_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h1mac=$(mac_get $h1)
+ rp1mac=$(mac_get $rp1)
+
+ vrf_prepare
+
+ router_h1_create
+ router_h2_create
+
+ router_create
+}
+
+router_offload_validate()
+{
+ local route_count=$1
+ local offloaded_count
+
+ offloaded_count=$(ip route | grep -o 'offload' | wc -l)
+ [[ $offloaded_count -ge $route_count ]]
+}
+
+router_routes_create()
+{
+ local route_count=$1
+ local count=0
+
+ ROUTE_FILE="$(mktemp)"
+
+ for i in {0..255}
+ do
+ for j in {0..255}
+ do
+ for k in {0..255}
+ do
+ if [[ $count -eq $route_count ]]; then
+ break 3
+ fi
+
+ echo route add 193.${i}.${j}.${k}/32 via \
+ 192.0.2.1 dev $rp2 >> $ROUTE_FILE
+ ((count++))
+ done
+ done
+ done
+
+ ip -b $ROUTE_FILE &> /dev/null
+}
+
+router_routes_destroy()
+{
+ if [[ -v ROUTE_FILE ]]; then
+ rm -f $ROUTE_FILE
+ fi
+}
+
+router_test()
+{
+ local route_count=$1
+ local should_fail=$2
+ local count=0
+
+ RET=0
+
+ router_routes_create $route_count
+
+ router_offload_validate $route_count
+ check_err_fail $should_fail $? "Offload of $route_count routes"
+ if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then
+ return
+ fi
+
+ tc filter add dev $h2 ingress protocol ip pref 1 flower \
+ skip_sw dst_ip 193.0.0.0/8 action drop
+
+ for i in {0..255}
+ do
+ for j in {0..255}
+ do
+ for k in {0..255}
+ do
+ if [[ $count -eq $route_count ]]; then
+ break 3
+ fi
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \
+ -A 192.0.1.1 -B 193.${i}.${j}.${k} \
+ -t ip -q
+ ((count++))
+ done
+ done
+ done
+
+ tc_check_packets "dev $h2 ingress" 1 $route_count
+ check_err $? "Offload mismatch"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 flower \
+ skip_sw dst_ip 193.0.0.0/8 action drop
+
+ router_routes_destroy
+}
+
+router_cleanup()
+{
+ pre_cleanup
+
+ router_routes_destroy
+ router_destroy
+
+ router_h2_destroy
+ router_h1_destroy
+
+ vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh
new file mode 100644
index 000000000000..73035e25085d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source "../../../../net/forwarding/devlink_lib.sh"
+
+if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then
+ echo "SKIP: test is tailored for Mellanox Spectrum"
+ exit 1
+fi
+
+# Needed for returning to default
+declare -A KVD_DEFAULTS
+
+KVD_CHILDREN="linear hash_single hash_double"
+KVDL_CHILDREN="singles chunks large_chunks"
+
+devlink_sp_resource_minimize()
+{
+ local size
+ local i
+
+ for i in $KVD_CHILDREN; do
+ size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd "$i"
+ done
+
+ for i in $KVDL_CHILDREN; do
+ size=$(devlink_resource_get kvd linear "$i" | \
+ jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd linear "$i"
+ done
+}
+
+devlink_sp_size_kvd_to_default()
+{
+ local need_reload=0
+ local i
+
+ for i in $KVD_CHILDREN; do
+ local size=$(echo "${KVD_DEFAULTS[kvd_$i]}" | jq '.["size"]')
+ current_size=$(devlink_resource_size_get kvd "$i")
+
+ if [ "$size" -ne "$current_size" ]; then
+ devlink_resource_size_set "$size" kvd "$i"
+ need_reload=1
+ fi
+ done
+
+ for i in $KVDL_CHILDREN; do
+ local size=$(echo "${KVD_DEFAULTS[kvd_linear_$i]}" | \
+ jq '.["size"]')
+ current_size=$(devlink_resource_size_get kvd linear "$i")
+
+ if [ "$size" -ne "$current_size" ]; then
+ devlink_resource_size_set "$size" kvd linear "$i"
+ need_reload=1
+ fi
+ done
+
+ if [ "$need_reload" -ne "0" ]; then
+ devlink_reload
+ fi
+}
+
+devlink_sp_read_kvd_defaults()
+{
+ local key
+ local i
+
+ KVD_DEFAULTS[kvd]=$(devlink_resource_get "kvd")
+ for i in $KVD_CHILDREN; do
+ key=kvd_$i
+ KVD_DEFAULTS[$key]=$(devlink_resource_get kvd "$i")
+ done
+
+ for i in $KVDL_CHILDREN; do
+ key=kvd_linear_$i
+ KVD_DEFAULTS[$key]=$(devlink_resource_get kvd linear "$i")
+ done
+}
+
+KVD_PROFILES="default scale ipv4_max"
+
+devlink_sp_resource_kvd_profile_set()
+{
+ local profile=$1
+
+ case "$profile" in
+ scale)
+ devlink_resource_size_set 64000 kvd linear
+ devlink_resource_size_set 15616 kvd linear singles
+ devlink_resource_size_set 32000 kvd linear chunks
+ devlink_resource_size_set 16384 kvd linear large_chunks
+ devlink_resource_size_set 128000 kvd hash_single
+ devlink_resource_size_set 48000 kvd hash_double
+ devlink_reload
+ ;;
+ ipv4_max)
+ devlink_resource_size_set 64000 kvd linear
+ devlink_resource_size_set 15616 kvd linear singles
+ devlink_resource_size_set 32000 kvd linear chunks
+ devlink_resource_size_set 16384 kvd linear large_chunks
+ devlink_resource_size_set 144000 kvd hash_single
+ devlink_resource_size_set 32768 kvd hash_double
+ devlink_reload
+ ;;
+ default)
+ devlink_resource_size_set 98304 kvd linear
+ devlink_resource_size_set 16384 kvd linear singles
+ devlink_resource_size_set 49152 kvd linear chunks
+ devlink_resource_size_set 32768 kvd linear large_chunks
+ devlink_resource_size_set 87040 kvd hash_single
+ devlink_resource_size_set 60416 kvd hash_double
+ devlink_reload
+ ;;
+ *)
+ check_err 1 "Unknown profile $profile"
+ esac
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
new file mode 100755
index 000000000000..b1fe960e398a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=1
+source devlink_lib_spectrum.sh
+
+setup_prepare()
+{
+ devlink_sp_read_kvd_defaults
+}
+
+cleanup()
+{
+ pre_cleanup
+ devlink_sp_size_kvd_to_default
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+profiles_test()
+{
+ local i
+
+ log_info "Running profile tests"
+
+ for i in $KVD_PROFILES; do
+ RET=0
+ devlink_sp_resource_kvd_profile_set $i
+ log_test "'$i' profile"
+ done
+
+ # Default is explicitly tested at end to ensure it's actually applied
+ RET=0
+ devlink_sp_resource_kvd_profile_set "default"
+ log_test "'default' profile"
+}
+
+resources_min_test()
+{
+ local size
+ local i
+ local j
+
+ log_info "Running KVD-minimum tests"
+
+ for i in $KVD_CHILDREN; do
+ RET=0
+ size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+ devlink_resource_size_set "$size" kvd "$i"
+
+ # In case of linear, need to minimize sub-resources as well
+ if [[ "$i" == "linear" ]]; then
+ for j in $KVDL_CHILDREN; do
+ devlink_resource_size_set 0 kvd linear "$j"
+ done
+ fi
+
+ devlink_reload
+ devlink_sp_size_kvd_to_default
+ log_test "'$i' minimize [$size]"
+ done
+}
+
+resources_max_test()
+{
+ local min_size
+ local size
+ local i
+ local j
+
+ log_info "Running KVD-maximum tests"
+ for i in $KVD_CHILDREN; do
+ RET=0
+ devlink_sp_resource_minimize
+
+ # Calculate the maximum possible size for the given partition
+ size=$(devlink_resource_size_get kvd)
+ for j in $KVD_CHILDREN; do
+ if [ "$i" != "$j" ]; then
+ min_size=$(devlink_resource_get kvd "$j" | \
+ jq '.["size_min"]')
+ size=$((size - min_size))
+ fi
+ done
+
+ # Test almost maximum size
+ devlink_resource_size_set "$((size - 128))" kvd "$i"
+ devlink_reload
+ log_test "'$i' almost maximize [$((size - 128))]"
+
+ # Test above maximum size
+ devlink resource set "$DEVLINK_DEV" \
+ path "kvd/$i" size $((size + 128)) &> /dev/null
+ check_fail $? "Set kvd/$i to size $((size + 128)) should fail"
+ log_test "'$i' Overflow rejection [$((size + 128))]"
+
+ # Test maximum size
+ if [ "$i" == "hash_single" ] || [ "$i" == "hash_double" ]; then
+ echo "SKIP: Observed problem with exact max $i"
+ continue
+ fi
+
+ devlink_resource_size_set "$size" kvd "$i"
+ devlink_reload
+ log_test "'$i' maximize [$size]"
+
+ devlink_sp_size_kvd_to_default
+ done
+}
+
+profiles_test
+resources_min_test
+resources_max_test
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
new file mode 100644
index 000000000000..8d2186c7c62b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../mirror_gre_scale.sh
+
+mirror_gre_get_target()
+{
+ local should_fail=$1; shift
+
+ if ((! should_fail)); then
+ echo 3
+ else
+ echo 4
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
new file mode 100755
index 000000000000..a0a80e1a69e8
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=6
+source ../../../../net/forwarding/lib.sh
+source ../../../../net/forwarding/tc_common.sh
+source devlink_lib_spectrum.sh
+
+current_test=""
+
+cleanup()
+{
+ pre_cleanup
+ if [ ! -z $current_test ]; then
+ ${current_test}_cleanup
+ fi
+ devlink_sp_size_kvd_to_default
+}
+
+devlink_sp_read_kvd_defaults
+trap cleanup EXIT
+
+ALL_TESTS="router tc_flower mirror_gre"
+for current_test in ${TESTS:-$ALL_TESTS}; do
+ source ${current_test}_scale.sh
+
+ num_netifs_var=${current_test^^}_NUM_NETIFS
+ num_netifs=${!num_netifs_var:-$NUM_NETIFS}
+
+ for profile in $KVD_PROFILES; do
+ RET=0
+ devlink_sp_resource_kvd_profile_set $profile
+ if [[ $RET -gt 0 ]]; then
+ log_test "'$current_test' [$profile] setting"
+ continue
+ fi
+
+ for should_fail in 0 1; do
+ RET=0
+ target=$(${current_test}_get_target "$should_fail")
+ ${current_test}_setup_prepare
+ setup_wait $num_netifs
+ ${current_test}_test "$target" "$should_fail"
+ ${current_test}_cleanup
+ if [[ "$should_fail" -eq 0 ]]; then
+ log_test "'$current_test' [$profile] $target"
+ else
+ log_test "'$current_test' [$profile] overflow $target"
+ fi
+ done
+ done
+done
+current_test=""
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh
new file mode 100644
index 000000000000..21c4697d5bab
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../router_scale.sh
+
+router_get_target()
+{
+ local should_fail=$1
+ local target
+
+ target=$(devlink_resource_size_get kvd hash_single)
+
+ if [[ $should_fail -eq 0 ]]; then
+ target=$((target * 85 / 100))
+ else
+ target=$((target + 1))
+ fi
+
+ echo $target
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh
new file mode 100644
index 000000000000..f9bfd8937765
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_flower_scale.sh
+
+tc_flower_get_target()
+{
+ local should_fail=$1; shift
+
+ # 6144 (6x1024) is the theoretical maximum.
+ # One bank of 512 rules is taken by the 18-byte MC router rule.
+ # One rule is the ACL catch-all.
+ # 6144 - 512 - 1 = 5631
+ local target=5631
+
+ if ((! should_fail)); then
+ echo $target
+ else
+ echo $((target + 1))
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
new file mode 100644
index 000000000000..a6d733d2a4b4
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for resource limit of offloaded flower rules. The test adds a given
+# number of flower matches for different IPv6 addresses, then generates traffic,
+# and ensures each was hit exactly once. This file contains functions to set up
+# a testing topology and run the test, and is meant to be sourced from a test
+# script that calls the testing routine with a given number of rules.
+
+TC_FLOWER_NUM_NETIFS=2
+
+tc_flower_h1_create()
+{
+ simple_if_init $h1
+ tc qdisc add dev $h1 clsact
+}
+
+tc_flower_h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1
+}
+
+tc_flower_h2_create()
+{
+ simple_if_init $h2
+ tc qdisc add dev $h2 clsact
+}
+
+tc_flower_h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2
+}
+
+tc_flower_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ vrf_prepare
+
+ tc_flower_h1_create
+ tc_flower_h2_create
+}
+
+tc_flower_cleanup()
+{
+ pre_cleanup
+
+ tc_flower_h2_destroy
+ tc_flower_h1_destroy
+
+ vrf_cleanup
+
+ if [[ -v TC_FLOWER_BATCH_FILE ]]; then
+ rm -f $TC_FLOWER_BATCH_FILE
+ fi
+}
+
+tc_flower_addr()
+{
+ local num=$1; shift
+
+ printf "2001:db8:1::%x" $num
+}
+
+tc_flower_rules_create()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ TC_FLOWER_BATCH_FILE="$(mktemp)"
+
+ for ((i = 0; i < count; ++i)); do
+ cat >> $TC_FLOWER_BATCH_FILE <<-EOF
+ filter add dev $h2 ingress \
+ prot ipv6 \
+ pref 1000 \
+ flower $tcflags dst_ip $(tc_flower_addr $i) \
+ action drop
+ EOF
+ done
+
+ tc -b $TC_FLOWER_BATCH_FILE
+ check_err_fail $should_fail $? "Rule insertion"
+}
+
+__tc_flower_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+ local last=$((count - 1))
+
+ tc_flower_rules_create $count $should_fail
+
+ for ((i = 0; i < count; ++i)); do
+ $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \
+ -A 2001:db8:2::1 \
+ -B $(tc_flower_addr $i)
+ done
+
+ MISMATCHES=$(
+ tc -j -s filter show dev $h2 ingress |
+ jq -r '[ .[] | select(.kind == "flower") | .options |
+ values as $rule | .actions[].stats.packets |
+ select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] |
+ join(", ")'
+ )
+
+ test -z "$MISMATCHES"
+ check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES"
+}
+
+tc_flower_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ # We use lower 16 bits of IPv6 address for match. Also there are only 16
+ # bits of rule priority space.
+ if ((count > 65536)); then
+ check_err 1 "Invalid count of $count. At most 65536 rules supported"
+ return
+ fi
+
+ if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+ check_err 1 "Could not test offloaded functionality"
+ return
+ fi
+
+ tcflags="skip_sw"
+ __tc_flower_test $count $should_fail
+}
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 1a0ac3a29ec5..78b24cf76f40 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -13,3 +13,4 @@ udpgso
udpgso_bench_rx
udpgso_bench_tx
tcp_inq
+tls
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 663e11e85727..9cca68e440a0 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -13,7 +13,7 @@ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
include ../lib.mk
diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README
index 4a0964c42860..b8a2af8fcfb7 100644
--- a/tools/testing/selftests/net/forwarding/README
+++ b/tools/testing/selftests/net/forwarding/README
@@ -46,6 +46,8 @@ Guidelines for Writing Tests
o Where possible, reuse an existing topology for different tests instead
of recreating the same topology.
+o Tests that use anything but the most trivial topologies should include
+ an ASCII art showing the topology.
o Where possible, IPv6 and IPv4 addresses shall conform to RFC 3849 and
RFC 5737, respectively.
o Where possible, tests shall be written so that they can be reused by
diff --git a/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh
new file mode 100755
index 000000000000..a43b4645c4de
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4 ping_ipv6 flooding"
+NUM_NETIFS=6
+CHECK_TC="yes"
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h3_create()
+{
+ simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+h3_destroy()
+{
+ simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+ ip link set dev $swp3 master br0
+
+ ip link set dev $swp1 type bridge_slave isolated on
+ check_err $? "Can't set isolation on port $swp1"
+ ip link set dev $swp2 type bridge_slave isolated on
+ check_err $? "Can't set isolation on port $swp2"
+ ip link set dev $swp3 type bridge_slave isolated off
+ check_err $? "Can't disable isolation on port $swp3"
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+ ip link set dev $swp3 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp3 down
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ RET=0
+ ping_do $h1 192.0.2.2
+ check_fail $? "Ping worked when it should not have"
+
+ RET=0
+ ping_do $h3 192.0.2.2
+ check_err $? "Ping didn't work when it should have"
+
+ log_test "Isolated port ping"
+}
+
+ping_ipv6()
+{
+ RET=0
+ ping6_do $h1 2001:db8:1::2
+ check_fail $? "Ping6 worked when it should not have"
+
+ RET=0
+ ping6_do $h3 2001:db8:1::2
+ check_err $? "Ping6 didn't work when it should have"
+
+ log_test "Isolated port ping6"
+}
+
+flooding()
+{
+ local mac=de:ad:be:ef:13:37
+ local ip=192.0.2.100
+
+ RET=0
+ flood_test_do false $mac $ip $h1 $h2
+ check_err $? "Packet was flooded when it should not have been"
+
+ RET=0
+ flood_test_do true $mac $ip $h3 $h2
+ check_err $? "Packet was not flooded when it should have been"
+
+ log_test "Isolated port flooding"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
new file mode 100644
index 000000000000..5ab1e5f43022
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+##############################################################################
+# Source library
+
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+ relative_path="."
+fi
+
+source "$relative_path/lib.sh"
+
+##############################################################################
+# Defines
+
+DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \
+ grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \
+ rev | cut -d"/" -f2- | rev)
+if [ -z "$DEVLINK_DEV" ]; then
+ echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
+ exit 1
+fi
+if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then
+ echo "SKIP: devlink device's bus is not PCI"
+ exit 1
+fi
+
+DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
+ -n | cut -d" " -f3)
+
+##############################################################################
+# Sanity checks
+
+devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing devlink resource support"
+ exit 1
+fi
+
+##############################################################################
+# Devlink helpers
+
+devlink_resource_names_to_path()
+{
+ local resource
+ local path=""
+
+ for resource in "${@}"; do
+ if [ "$path" == "" ]; then
+ path="$resource"
+ else
+ path="${path}/$resource"
+ fi
+ done
+
+ echo "$path"
+}
+
+devlink_resource_get()
+{
+ local name=$1
+ local resource_name=.[][\"$DEVLINK_DEV\"]
+
+ resource_name="$resource_name | .[] | select (.name == \"$name\")"
+
+ shift
+ for resource in "${@}"; do
+ resource_name="${resource_name} | .[\"resources\"][] | \
+ select (.name == \"$resource\")"
+ done
+
+ devlink -j resource show "$DEVLINK_DEV" | jq "$resource_name"
+}
+
+devlink_resource_size_get()
+{
+ local size=$(devlink_resource_get "$@" | jq '.["size_new"]')
+
+ if [ "$size" == "null" ]; then
+ devlink_resource_get "$@" | jq '.["size"]'
+ else
+ echo "$size"
+ fi
+}
+
+devlink_resource_size_set()
+{
+ local new_size=$1
+ local path
+
+ shift
+ path=$(devlink_resource_names_to_path "$@")
+ devlink resource set "$DEVLINK_DEV" path "$path" size "$new_size"
+ check_err $? "Failed setting path $path to size $size"
+}
+
+devlink_reload()
+{
+ local still_pending
+
+ devlink dev reload "$DEVLINK_DEV" &> /dev/null
+ check_err $? "Failed reload"
+
+ still_pending=$(devlink resource show "$DEVLINK_DEV" | \
+ grep -c "size_new")
+ check_err $still_pending "Failed reload - There are still unset sizes"
+}
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
new file mode 100755
index 000000000000..3b1e047f7617
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -0,0 +1,253 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test traffic distribution when a wECMP route forwards traffic to two GRE
+# tunnels.
+#
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|------------------------+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) + g1b (gre) |
+# | loc=192.0.2.65 loc=192.0.2.81 |
+# | rem=192.0.2.66 --. rem=192.0.2.82 --. |
+# | tos=inherit | tos=inherit | |
+# | .------------------' | |
+# | | .------------------' |
+# | v v |
+# | + $ul1.111 (vlan) + $ul1.222 (vlan) |
+# | | 192.0.2.129/28 | 192.0.2.145/28 |
+# | \ / |
+# | \________________/ |
+# | | |
+# | + $ul1 |
+# +------------|-------------------------------+
+# |
+# +------------|-------------------------------+
+# | SW2 + $ul2 |
+# | _______|________ |
+# | / \ |
+# | / \ |
+# | + $ul2.111 (vlan) + $ul2.222 (vlan) |
+# | ^ 192.0.2.130/28 ^ 192.0.2.146/28 |
+# | | | |
+# | | '------------------. |
+# | '------------------. | |
+# | + g2a (gre) | + g2b (gre) | |
+# | loc=192.0.2.66 | loc=192.0.2.82 | |
+# | rem=192.0.2.65 --' rem=192.0.2.81 --' |
+# | tos=inherit tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# +-------------------|------------------------+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ multipath_ipv4
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+sw1_create()
+{
+ simple_if_init $ol1 192.0.2.2/28
+ __simple_if_init $ul1 v$ol1
+ vlan_create $ul1 111 v$ol1 192.0.2.129/28
+ vlan_create $ul1 222 v$ol1 192.0.2.145/28
+
+ tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1
+ __simple_if_init g1a v$ol1 192.0.2.65/32
+ ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+
+ tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1
+ __simple_if_init g1b v$ol1 192.0.2.81/32
+ ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+
+ ip route add vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a \
+ nexthop dev g1b
+
+ tc qdisc add dev $ul1 clsact
+ tc filter add dev $ul1 egress pref 111 prot 802.1q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul1 egress pref 222 prot 802.1q \
+ flower vlan_id 222 action pass
+}
+
+sw1_destroy()
+{
+ tc qdisc del dev $ul1 clsact
+
+ ip route del vrf v$ol1 192.0.2.16/28
+
+ ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+ __simple_if_fini g1b 192.0.2.81/32
+ tunnel_destroy g1b
+
+ ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 222
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_create()
+{
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+ vlan_create $ul2 222 v$ol2 192.0.2.146/28
+
+ tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+
+ tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2
+ __simple_if_init g2b v$ol2 192.0.2.82/32
+ ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+
+ ip route add vrf v$ol2 192.0.2.0/28 \
+ nexthop dev g2a \
+ nexthop dev g2b
+}
+
+sw2_destroy()
+{
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+ __simple_if_fini g2b 192.0.2.82/32
+ tunnel_destroy g2b
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 222
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ vrf_prepare
+ h1_create
+ sw1_create
+ sw2_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+multipath4_test()
+{
+ local what=$1; shift
+ local weight1=$1; shift
+ local weight2=$1; shift
+
+ sysctl_set net.ipv4.fib_multipath_hash_policy 1
+ ip route replace vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a weight $weight1 \
+ nexthop dev g1b weight $weight2
+
+ local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+ local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+
+ ip vrf exec v$h1 \
+ $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+ local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+ local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
+ multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+ ip route replace vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a \
+ nexthop dev g1b
+ sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.18
+}
+
+multipath_ipv4()
+{
+ log_info "Running IPv4 multipath tests"
+ multipath4_test "ECMP" 1 1
+ multipath4_test "Weighted MP 2:1" 2 1
+ multipath4_test "Weighted MP 11:45" 11 45
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 7b18a53aa556..ca53b539aa2d 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -8,14 +8,21 @@
PING=${PING:=ping}
PING6=${PING6:=ping6}
MZ=${MZ:=mausezahn}
+ARPING=${ARPING:=arping}
+TEAMD=${TEAMD:=teamd}
WAIT_TIME=${WAIT_TIME:=5}
PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no}
NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
-if [[ -f forwarding.config ]]; then
- source forwarding.config
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+ relative_path="."
+fi
+
+if [[ -f $relative_path/forwarding.config ]]; then
+ source "$relative_path/forwarding.config"
fi
##############################################################################
@@ -28,7 +35,10 @@ check_tc_version()
echo "SKIP: iproute2 too old; tc is missing JSON support"
exit 1
fi
+}
+check_tc_shblock_support()
+{
tc filter help 2>&1 | grep block &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: iproute2 too old; tc is missing shared block support"
@@ -36,6 +46,15 @@ check_tc_version()
fi
}
+check_tc_chain_support()
+{
+ tc help 2>&1|grep chain &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "SKIP: iproute2 too old; tc is missing chain support"
+ exit 1
+ fi
+}
+
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
exit 0
@@ -45,15 +64,18 @@ if [[ "$CHECK_TC" = "yes" ]]; then
check_tc_version
fi
-if [[ ! -x "$(command -v jq)" ]]; then
- echo "SKIP: jq not installed"
- exit 1
-fi
+require_command()
+{
+ local cmd=$1; shift
-if [[ ! -x "$(command -v $MZ)" ]]; then
- echo "SKIP: $MZ not installed"
- exit 1
-fi
+ if [[ ! -x "$(command -v "$cmd")" ]]; then
+ echo "SKIP: $cmd not installed"
+ exit 1
+ fi
+}
+
+require_command jq
+require_command $MZ
if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\""
@@ -151,6 +173,19 @@ check_fail()
fi
}
+check_err_fail()
+{
+ local should_fail=$1; shift
+ local err=$1; shift
+ local what=$1; shift
+
+ if ((should_fail)); then
+ check_fail $err "$what succeeded, but should have failed"
+ else
+ check_err $err "$what failed"
+ fi
+}
+
log_test()
{
local test_name=$1
@@ -185,24 +220,54 @@ log_info()
echo "INFO: $msg"
}
+setup_wait_dev()
+{
+ local dev=$1; shift
+
+ while true; do
+ ip link show dev $dev up \
+ | grep 'state UP' &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ sleep 1
+ else
+ break
+ fi
+ done
+}
+
setup_wait()
{
- for i in $(eval echo {1..$NUM_NETIFS}); do
- while true; do
- ip link show dev ${NETIFS[p$i]} up \
- | grep 'state UP' &> /dev/null
- if [[ $? -ne 0 ]]; then
- sleep 1
- else
- break
- fi
- done
+ local num_netifs=${1:-$NUM_NETIFS}
+
+ for ((i = 1; i <= num_netifs; ++i)); do
+ setup_wait_dev ${NETIFS[p$i]}
done
# Make sure links are ready.
sleep $WAIT_TIME
}
+lldpad_app_wait_set()
+{
+ local dev=$1; shift
+
+ while lldptool -t -i $dev -V APP -c app | grep -q pending; do
+ echo "$dev: waiting for lldpad to push pending APP updates"
+ sleep 5
+ done
+}
+
+lldpad_app_wait_del()
+{
+ # Give lldpad a chance to push down the changes. If the device is downed
+ # too soon, the updates will be left pending. However, they will have
+ # been struck off the lldpad's DB already, so we won't be able to tell
+ # they are pending. Then on next test iteration this would cause
+ # weirdness as newly-added APP rules conflict with the old ones,
+ # sometimes getting stuck in an "unknown" state.
+ sleep 5
+}
+
pre_cleanup()
{
if [ "${PAUSE_ON_CLEANUP}" = "yes" ]; then
@@ -287,6 +352,29 @@ __addr_add_del()
done
}
+__simple_if_init()
+{
+ local if_name=$1; shift
+ local vrf_name=$1; shift
+ local addrs=("${@}")
+
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $if_name up
+
+ __addr_add_del $if_name add "${addrs[@]}"
+}
+
+__simple_if_fini()
+{
+ local if_name=$1; shift
+ local addrs=("${@}")
+
+ __addr_add_del $if_name del "${addrs[@]}"
+
+ ip link set dev $if_name down
+ ip link set dev $if_name nomaster
+}
+
simple_if_init()
{
local if_name=$1
@@ -298,11 +386,8 @@ simple_if_init()
array=("${@}")
vrf_create $vrf_name
- ip link set dev $if_name master $vrf_name
ip link set dev $vrf_name up
- ip link set dev $if_name up
-
- __addr_add_del $if_name add "${array[@]}"
+ __simple_if_init $if_name $vrf_name "${array[@]}"
}
simple_if_fini()
@@ -315,9 +400,7 @@ simple_if_fini()
vrf_name=v$if_name
array=("${@}")
- __addr_add_del $if_name del "${array[@]}"
-
- ip link set dev $if_name down
+ __simple_if_fini $if_name "${array[@]}"
vrf_destroy $vrf_name
}
@@ -365,6 +448,28 @@ vlan_destroy()
ip link del dev $name
}
+team_create()
+{
+ local if_name=$1; shift
+ local mode=$1; shift
+
+ require_command $TEAMD
+ $TEAMD -t $if_name -d -c '{"runner": {"name": "'$mode'"}}'
+ for slave in "$@"; do
+ ip link set dev $slave down
+ ip link set dev $slave master $if_name
+ ip link set dev $slave up
+ done
+ ip link set dev $if_name up
+}
+
+team_destroy()
+{
+ local if_name=$1; shift
+
+ $TEAMD -t $if_name -k
+}
+
master_name_get()
{
local if_name=$1
@@ -383,9 +488,10 @@ tc_rule_stats_get()
{
local dev=$1; shift
local pref=$1; shift
+ local dir=$1; shift
- tc -j -s filter show dev $dev ingress pref $pref |
- jq '.[1].options.actions[].stats.packets'
+ tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+ | jq '.[1].options.actions[].stats.packets'
}
mac_get()
@@ -437,7 +543,9 @@ forwarding_restore()
tc_offload_check()
{
- for i in $(eval echo {1..$NUM_NETIFS}); do
+ local num_netifs=${1:-$NUM_NETIFS}
+
+ for ((i = 1; i <= num_netifs; ++i)); do
ethtool -k ${NETIFS[p$i]} \
| grep "hw-tc-offload: on" &> /dev/null
if [[ $? -ne 0 ]]; then
@@ -453,9 +561,15 @@ trap_install()
local dev=$1; shift
local direction=$1; shift
- # For slow-path testing, we need to install a trap to get to
- # slow path the packets that would otherwise be switched in HW.
- tc filter add dev $dev $direction pref 1 flower skip_sw action trap
+ # Some devices may not support or need in-hardware trapping of traffic
+ # (e.g. the veth pairs that this library creates for non-existent
+ # loopbacks). Use continue instead, so that there is a filter in there
+ # (some tests check counters), and so that other filters are still
+ # processed.
+ tc filter add dev $dev $direction pref 1 \
+ flower skip_sw action trap 2>/dev/null \
+ || tc filter add dev $dev $direction pref 1 \
+ flower action continue
}
trap_uninstall()
@@ -463,11 +577,13 @@ trap_uninstall()
local dev=$1; shift
local direction=$1; shift
- tc filter del dev $dev $direction pref 1 flower skip_sw
+ tc filter del dev $dev $direction pref 1 flower
}
slow_path_trap_install()
{
+ # For slow-path testing, we need to install a trap to get to
+ # slow path the packets that would otherwise be switched in HW.
if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
trap_install "$@"
fi
@@ -537,6 +653,48 @@ vlan_capture_uninstall()
__vlan_capture_add_del del 100 "$@"
}
+__dscp_capture_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+ local base=$1; shift
+ local dscp;
+
+ for prio in {0..7}; do
+ dscp=$((base + prio))
+ __icmp_capture_add_del $add_del $((dscp + 100)) "" $dev \
+ "skip_hw ip_tos $((dscp << 2))"
+ done
+}
+
+dscp_capture_install()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ __dscp_capture_add_del add $dev $base
+}
+
+dscp_capture_uninstall()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ __dscp_capture_add_del del $dev $base
+}
+
+dscp_fetch_stats()
+{
+ local dev=$1; shift
+ local base=$1; shift
+
+ for prio in {0..7}; do
+ local dscp=$((base + prio))
+ local t=$(tc_rule_stats_get $dev $((dscp + 100)))
+ echo "[$dscp]=$t "
+ done
+}
+
matchall_sink_create()
{
local dev=$1; shift
@@ -557,33 +715,86 @@ tests_run()
done
}
+multipath_eval()
+{
+ local desc="$1"
+ local weight_rp12=$2
+ local weight_rp13=$3
+ local packets_rp12=$4
+ local packets_rp13=$5
+ local weights_ratio packets_ratio diff
+
+ RET=0
+
+ if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+ weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
+ | bc -l)
+ else
+ weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" \
+ | bc -l)
+ fi
+
+ if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
+ check_err 1 "Packet difference is 0"
+ log_test "Multipath"
+ log_info "Expected ratio $weights_ratio"
+ return
+ fi
+
+ if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+ packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
+ | bc -l)
+ else
+ packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" \
+ | bc -l)
+ fi
+
+ diff=$(echo $weights_ratio - $packets_ratio | bc -l)
+ diff=${diff#-}
+
+ test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
+ check_err $? "Too large discrepancy between expected and measured ratios"
+ log_test "$desc"
+ log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
+}
+
##############################################################################
# Tests
-ping_test()
+ping_do()
{
local if_name=$1
local dip=$2
local vrf_name
- RET=0
-
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping_test()
+{
+ RET=0
+
+ ping_do $1 $2
check_err $?
log_test "ping"
}
-ping6_test()
+ping6_do()
{
local if_name=$1
local dip=$2
local vrf_name
- RET=0
-
vrf_name=$(master_name_get $if_name)
ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping6_test()
+{
+ RET=0
+
+ ping6_do $1 $2
check_err $?
log_test "ping6"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
new file mode 100755
index 000000000000..c5095da7f6bf
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device without vlan filtering (802.1d).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o---> mirror | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 + br1 (802.1q bridge) $swp2 + | |
+# | +---------------------------------------------------------------------+ |
+# | |
+# | +---------------------------------------------------------------------+ |
+# | | + br2 (802.1d bridge) | |
+# | | 192.0.2.129/28 | |
+# | | + $swp3 2001:db8:2::1/64 | |
+# | +---|-----------------------------------------------------------------+ |
+# | | ^ ^ |
+# | | + gt6 (ip6gretap) | + gt4 (gretap) | |
+# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | |
+# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# +-----|---------------------:----------------------:----------------------+
+# | : :
+# +-----|---------------------:----------------------:----------------------+
+# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) |
+# | 192.0.2.130/28 loc=2001:db8:2::2 loc=192.0.2.130 |
+# | 2001:db8:2::2/64 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | ttl=100 ttl=100 |
+# | tos=inherit tos=inherit |
+# +-------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_gretap
+ test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev br2 up
+
+ ip link set dev $swp3 master br2
+ ip route add 192.0.2.130/32 dev br2
+ ip -6 route add 2001:db8:2::2/128 dev br2
+
+ ip address add dev br2 192.0.2.129/28
+ ip address add dev br2 2001:db8:2::1/64
+
+ ip address add dev $h3 192.0.2.130/28
+ ip address add dev $h3 2001:db8:2::2/64
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip address del dev $h3 2001:db8:2::2/64
+ ip address del dev $h3 192.0.2.130/28
+ ip link del dev br2
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_gretap()
+{
+ full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+ full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
index 3bb4c2ba7b14..197e769c2ed1 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
@@ -74,12 +74,14 @@ test_vlan_match()
test_gretap()
{
- test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+ test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to gretap"
}
test_ip6gretap()
{
- test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to ip6gretap"
}
test_gretap_stp()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
new file mode 100755
index 000000000000..a3402cd8d5b6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device with vlan filtering (802.1q).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|---------------------------------------------------------------|-----+
+# | SW o---> mirror | |
+# | +---|---------------------------------------------------------------|---+ |
+# | | + $swp1 + br1 (802.1q bridge) $swp2 + | |
+# | | 192.0.2.129/28 | |
+# | | + $swp3 2001:db8:2::1/64 | |
+# | | | vid555 vid555[pvid,untagged] | |
+# | +---|-------------------------------------------------------------------+ |
+# | | ^ ^ |
+# | | + gt6 (ip6gretap) | + gt4 (gretap) | |
+# | | : loc=2001:db8:2::1 | : loc=192.0.2.129 | |
+# | | : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+ |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# +-----|---------------------:------------------------:----------------------+
+# | : :
+# +-----|---------------------:------------------------:----------------------+
+# | H3 + $h3 + h3-gt6(ip6gretap) + h3-gt4 (gretap) |
+# | | loc=2001:db8:2::2 loc=192.0.2.130 |
+# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | 192.0.2.130/28 ttl=100 ttl=100 |
+# | 2001:db8:2::2/64 tos=inherit tos=inherit |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_gretap
+ test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ mirror_gre_topo_create
+
+ ip link set dev $swp3 master br1
+ bridge vlan add dev br1 vid 555 pvid untagged self
+ ip address add dev br1 192.0.2.129/28
+ ip address add dev br1 2001:db8:2::1/64
+
+ ip -4 route add 192.0.2.130/32 dev br1
+ ip -6 route add 2001:db8:2::2/128 dev br1
+
+ vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+ bridge vlan add dev $swp3 vid 555
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev $swp3 nomaster
+ vlan_destroy $h3 555
+
+ mirror_gre_topo_destroy
+ vrf_cleanup
+}
+
+test_gretap()
+{
+ full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+ full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+ full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+ full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+tests()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+tests
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ tests
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
new file mode 100755
index 000000000000..61844caf671e
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device with vlan filtering (802.1q), and the egress device is a team
+# device.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1.333 | | $h1.555 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +-----|----------------+ +----------------|-----+
+# | $h1 |
+# +--------------------------------+------------------------------+
+# |
+# +--------------------------------------|------------------------------------+
+# | SW o---> mirror |
+# | | |
+# | +--------------------------------+------------------------------+ |
+# | | $swp1 | |
+# | + $swp1.333 $swp1.555 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | BR1 (802.1q) | |
+# | | + lag (team) 192.0.2.129/28 | |
+# | | / \ 2001:db8:2::1/64 | |
+# | +---/---\---------------------------------------------------------------+ |
+# | / \ ^ |
+# | | \ + gt4 (gretap) | |
+# | | \ loc=192.0.2.129 | |
+# | | \ rem=192.0.2.130 -+ |
+# | | \ ttl=100 |
+# | | \ tos=inherit |
+# | | \ |
+# | | \_________________________________ |
+# | | \ |
+# | + $swp3 + $swp4 |
+# +---|------------------------------------------------|----------------------+
+# | |
+# +---|----------------------+ +---|----------------------+
+# | + $h3 H3 | | + $h4 H4 |
+# | 192.0.2.130/28 | | 192.0.2.130/28 |
+# | 2001:db8:2::2/64 | | 2001:db8:2::2/64 |
+# +--------------------------+ +--------------------------+
+
+ALL_TESTS="
+ test_mirror_gretap_first
+ test_mirror_gretap_second
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+
+require_command $ARPING
+
+vlan_host_create()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+ local ips=("${@}")
+
+ vrf_create $vrf_name
+ ip link set dev $vrf_name up
+ vlan_create $if_name $vid $vrf_name "${ips[@]}"
+}
+
+vlan_host_destroy()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+
+ vlan_destroy $if_name $vid
+ ip link set dev $vrf_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ vlan_host_create $h1 333 vrf-h1 192.0.2.1/28
+ ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del 192.0.2.16/28 vrf vrf-h1
+ vlan_host_destroy $h1 333 vrf-h1
+}
+
+h2_create()
+{
+ vlan_host_create $h1 555 vrf-h2 192.0.2.18/28
+ ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip -4 route del 192.0.2.0/28 vrf vrf-h2
+ vlan_host_destroy $h1 555 vrf-h2
+}
+
+h3_create()
+{
+ simple_if_init $h3 192.0.2.130/28
+ tc qdisc add dev $h3 clsact
+}
+
+h3_destroy()
+{
+ tc qdisc del dev $h3 clsact
+ simple_if_fini $h3 192.0.2.130/28
+}
+
+h4_create()
+{
+ simple_if_init $h4 192.0.2.130/28
+ tc qdisc add dev $h4 clsact
+}
+
+h4_destroy()
+{
+ tc qdisc del dev $h4 clsact
+ simple_if_fini $h4 192.0.2.130/28
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+ vlan_create $swp1 333 "" 192.0.2.2/28
+ vlan_create $swp1 555 "" 192.0.2.17/28
+
+ tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
+ ttl 100 tos inherit
+
+ ip link set dev $swp3 up
+ ip link set dev $swp4 up
+
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+ __addr_add_del br1 add 192.0.2.129/32
+ ip -4 route add 192.0.2.130/32 dev br1
+
+ team_create lag loadbalance $swp3 $swp4
+ ip link set dev lag master br1
+}
+
+switch_destroy()
+{
+ ip link set dev lag nomaster
+ team_destroy lag
+
+ ip -4 route del 192.0.2.130/32 dev br1
+ __addr_add_del br1 del 192.0.2.129/32
+ ip link set dev br1 down
+ ip link del dev br1
+
+ ip link set dev $swp4 down
+ ip link set dev $swp3 down
+
+ tunnel_destroy gt4
+
+ vlan_destroy $swp1 555
+ vlan_destroy $swp1 333
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp3=${NETIFS[p3]}
+ h3=${NETIFS[p4]}
+
+ swp4=${NETIFS[p5]}
+ h4=${NETIFS[p6]}
+
+ vrf_prepare
+
+ ip link set dev $h1 up
+ h1_create
+ h2_create
+ h3_create
+ h4_create
+ switch_create
+
+ trap_install $h3 ingress
+ trap_install $h4 ingress
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ trap_uninstall $h4 ingress
+ trap_uninstall $h3 ingress
+
+ switch_destroy
+ h4_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+ ip link set dev $h1 down
+
+ vrf_cleanup
+}
+
+test_lag_slave()
+{
+ local host_dev=$1; shift
+ local up_dev=$1; shift
+ local down_dev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress gt4 \
+ "proto 802.1q flower vlan_id 333 $tcflags"
+
+ # Test connectivity through $up_dev when $down_dev is set down.
+ ip link set dev $down_dev down
+ setup_wait_dev $up_dev
+ setup_wait_dev $host_dev
+ $ARPING -I br1 192.0.2.130 -qfc 1
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10
+
+ # Test lack of connectivity when both slaves are down.
+ ip link set dev $up_dev down
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+
+ ip link set dev $up_dev up
+ ip link set dev $down_dev up
+ mirror_uninstall $swp1 ingress
+
+ log_test "$what ($tcflags)"
+}
+
+test_mirror_gretap_first()
+{
+ test_lag_slave $h3 $swp3 $swp4 "mirror to gretap: LAG first slave"
+}
+
+test_mirror_gretap_second()
+{
+ test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index aa29d46186a8..135902aa8b11 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -122,15 +122,8 @@ test_span_gre_egress_up()
# After setting the device up, wait for neighbor to get resolved so that
# we can expect mirroring to work.
ip link set dev $swp3 up
- while true; do
- ip neigh sh dev $swp3 $remote_ip nud reachable |
- grep -q ^
- if [[ $? -ne 0 ]]; then
- sleep 1
- else
- break
- fi
- done
+ setup_wait_dev $swp3
+ ping -c 1 -I $swp3 $remote_ip &>/dev/null
quick_test_span_gre_dir $tundev ingress
mirror_uninstall $swp1 ingress
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
new file mode 100755
index 000000000000..9edf4cb104a8
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh
@@ -0,0 +1,285 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# team device.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1.333 | | $h1.555 + |
+# | | 192.0.2.1/28 | | 192.0.2.18/28 | |
+# +----|-----------------+ +----------------|-----+
+# | $h1 |
+# +---------------------------------+------------------------------+
+# |
+# +--------------------------------------|------------------------------------+
+# | SW o---> mirror |
+# | | |
+# | +----------------------------------+------------------------------+ |
+# | | $swp1 | |
+# | + $swp1.333 $swp1.555 + |
+# | 192.0.2.2/28 192.0.2.17/28 |
+# | |
+# | |
+# | + gt4 (gretap) ,-> + lag1 (team) |
+# | loc=192.0.2.129 | | 192.0.2.129/28 |
+# | rem=192.0.2.130 --' | |
+# | ttl=100 | |
+# | tos=inherit | |
+# | _____________________|______________________ |
+# | / \ |
+# | / \ |
+# | + $swp3 + $swp4 |
+# +---|------------------------------------------------|----------------------+
+# | |
+# +---|------------------------------------------------|----------------------+
+# | + $h3 + $h4 H3 |
+# | \ / |
+# | \____________________________________________/ |
+# | | |
+# | + lag2 (team) |
+# | 192.0.2.130/28 |
+# | |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ test_mirror_gretap_first
+ test_mirror_gretap_second
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+
+require_command $ARPING
+
+vlan_host_create()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+ local ips=("${@}")
+
+ vrf_create $vrf_name
+ ip link set dev $vrf_name up
+ vlan_create $if_name $vid $vrf_name "${ips[@]}"
+}
+
+vlan_host_destroy()
+{
+ local if_name=$1; shift
+ local vid=$1; shift
+ local vrf_name=$1; shift
+
+ vlan_destroy $if_name $vid
+ ip link set dev $vrf_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ vlan_host_create $h1 333 vrf-h1 192.0.2.1/28
+ ip -4 route add 192.0.2.16/28 vrf vrf-h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del 192.0.2.16/28 vrf vrf-h1
+ vlan_host_destroy $h1 333 vrf-h1
+}
+
+h2_create()
+{
+ vlan_host_create $h1 555 vrf-h2 192.0.2.18/28
+ ip -4 route add 192.0.2.0/28 vrf vrf-h2 nexthop via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip -4 route del 192.0.2.0/28 vrf vrf-h2
+ vlan_host_destroy $h1 555 vrf-h2
+}
+
+h3_create_team()
+{
+ team_create lag2 lacp $h3 $h4
+ __simple_if_init lag2 vrf-h3 192.0.2.130/32
+ ip -4 route add vrf vrf-h3 192.0.2.129/32 dev lag2
+}
+
+h3_destroy_team()
+{
+ ip -4 route del vrf vrf-h3 192.0.2.129/32 dev lag2
+ __simple_if_fini lag2 192.0.2.130/32
+ team_destroy lag2
+
+ ip link set dev $h3 down
+ ip link set dev $h4 down
+}
+
+h3_create()
+{
+ vrf_create vrf-h3
+ ip link set dev vrf-h3 up
+ tc qdisc add dev $h3 clsact
+ tc qdisc add dev $h4 clsact
+ h3_create_team
+}
+
+h3_destroy()
+{
+ h3_destroy_team
+ tc qdisc del dev $h4 clsact
+ tc qdisc del dev $h3 clsact
+ ip link set dev vrf-h3 down
+ vrf_destroy vrf-h3
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ tc qdisc add dev $swp1 clsact
+ vlan_create $swp1 333 "" 192.0.2.2/28
+ vlan_create $swp1 555 "" 192.0.2.17/28
+
+ tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
+ ttl 100 tos inherit
+
+ ip link set dev $swp3 up
+ ip link set dev $swp4 up
+ team_create lag1 lacp $swp3 $swp4
+ __addr_add_del lag1 add 192.0.2.129/32
+ ip -4 route add 192.0.2.130/32 dev lag1
+}
+
+switch_destroy()
+{
+ ip -4 route del 192.0.2.130/32 dev lag1
+ __addr_add_del lag1 del 192.0.2.129/32
+ team_destroy lag1
+
+ ip link set dev $swp4 down
+ ip link set dev $swp3 down
+
+ tunnel_destroy gt4
+
+ vlan_destroy $swp1 555
+ vlan_destroy $swp1 333
+ tc qdisc del dev $swp1 clsact
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp3=${NETIFS[p3]}
+ h3=${NETIFS[p4]}
+
+ swp4=${NETIFS[p5]}
+ h4=${NETIFS[p6]}
+
+ vrf_prepare
+
+ ip link set dev $h1 up
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+
+ trap_install $h3 ingress
+ trap_install $h4 ingress
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ trap_uninstall $h4 ingress
+ trap_uninstall $h3 ingress
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+ ip link set dev $h1 down
+
+ vrf_cleanup
+}
+
+test_lag_slave()
+{
+ local up_dev=$1; shift
+ local down_dev=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ mirror_install $swp1 ingress gt4 \
+ "proto 802.1q flower vlan_id 333 $tcflags"
+
+ # Move $down_dev away from the team. That will prompt change in
+ # txability of the connected device, without changing its upness. The
+ # driver should notice the txability change and move the traffic to the
+ # other slave.
+ ip link set dev $down_dev nomaster
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10
+
+ # Test lack of connectivity when neither slave is txable.
+ ip link set dev $up_dev nomaster
+ sleep 2
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0
+ mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0
+ mirror_uninstall $swp1 ingress
+
+ # Recreate H3's team device, because mlxsw, which this test is
+ # predominantly mean to test, requires a bottom-up construction and
+ # doesn't allow enslavement to a device that already has an upper.
+ h3_destroy_team
+ h3_create_team
+ # Wait for ${h,swp}{3,4}.
+ setup_wait
+
+ log_test "$what ($tcflags)"
+}
+
+test_mirror_gretap_first()
+{
+ test_lag_slave $h3 $h4 "mirror to gretap: LAG first slave"
+}
+
+test_mirror_gretap_second()
+{
+ test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave"
+}
+
+test_all()
+{
+ slow_path_trap_install $swp1 ingress
+ slow_path_trap_install $swp1 egress
+
+ tests_run
+
+ slow_path_trap_uninstall $swp1 egress
+ slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+ echo "WARN: Could not test offloaded functionality"
+else
+ tcflags="skip_sw"
+ test_all
+fi
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index 619b469365be..fac486178ef7 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-source mirror_lib.sh
+source "$relative_path/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
@@ -62,7 +62,7 @@ full_test_span_gre_dir_vlan_ips()
"$backward_type" "$ip1" "$ip2"
tc filter add dev $h3 ingress pref 77 prot 802.1q \
- flower $vlan_match ip_proto 0x2f \
+ flower $vlan_match \
action pass
mirror_test v$h1 $ip1 $ip2 $h3 77 10
tc filter del dev $h3 ingress pref 77
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
index 8fa681eb90e7..6f9ef1820e93 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
@@ -35,6 +35,8 @@ setup_prepare()
vrf_prepare
mirror_gre_topo_create
+ sysctl_set net.ipv4.conf.v$h3.rp_filter 0
+
ip address add dev $swp3 192.0.2.161/28
ip address add dev $h3 192.0.2.162/28
ip address add dev gt4 192.0.2.129/32
@@ -61,6 +63,8 @@ cleanup()
ip address del dev $h3 192.0.2.162/28
ip address del dev $swp3 192.0.2.161/28
+ sysctl_restore net.ipv4.conf.v$h3.rp_filter 0
+
mirror_gre_topo_destroy
vrf_cleanup
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
index 253419564708..39c03e2867f4 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
@@ -33,7 +33,7 @@
# | |
# +-------------------------------------------------------------------------+
-source mirror_topo_lib.sh
+source "$relative_path/mirror_topo_lib.sh"
mirror_gre_topo_h3_create()
{
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 5dbc7a08f4bd..204b25f13934 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -28,6 +28,8 @@ source mirror_lib.sh
source mirror_gre_lib.sh
source mirror_gre_topo_lib.sh
+require_command $ARPING
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -39,6 +41,12 @@ setup_prepare()
swp3=${NETIFS[p5]}
h3=${NETIFS[p6]}
+ # gt4's remote address is at $h3.555, not $h3. Thus the packets arriving
+ # directly to $h3 for test_gretap_untagged_egress() are rejected by
+ # rp_filter and the test spuriously fails.
+ sysctl_set net.ipv4.conf.all.rp_filter 0
+ sysctl_set net.ipv4.conf.$h3.rp_filter 0
+
vrf_prepare
mirror_gre_topo_create
@@ -65,6 +73,9 @@ cleanup()
mirror_gre_topo_destroy
vrf_cleanup
+
+ sysctl_restore net.ipv4.conf.$h3.rp_filter
+ sysctl_restore net.ipv4.conf.all.rp_filter
}
test_vlan_match()
@@ -79,12 +90,14 @@ test_vlan_match()
test_gretap()
{
- test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+ test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to gretap"
}
test_ip6gretap()
{
- test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+ test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+ "mirror to ip6gretap"
}
test_span_gre_forbidden_cpu()
@@ -138,7 +151,7 @@ test_span_gre_forbidden_egress()
bridge vlan add dev $swp3 vid 555
# Re-prime FDB
- arping -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
quick_test_span_gre_dir $tundev ingress
@@ -212,7 +225,7 @@ test_span_gre_fdb_roaming()
bridge fdb del dev $swp2 $h3mac vlan 555 master
# Re-prime FDB
- arping -I br1.555 192.0.2.130 -fqc 1
+ $ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
quick_test_span_gre_dir $tundev ingress
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index d36dc26c6c51..07991e1025c7 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -105,7 +105,7 @@ do_test_span_vlan_dir_ips()
# Install the capture as skip_hw to avoid double-counting of packets.
# The traffic is meant for local box anyway, so will be trapped to
# kernel.
- vlan_capture_install $dev "skip_hw vlan_id $vid"
+ vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip"
mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
vlan_capture_uninstall $dev
diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh
new file mode 100755
index 000000000000..ebc596a272f7
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_bridge.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:2::/64 vrf v$h1
+ ip -4 route del 192.0.2.128/28 vrf v$h1
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2
+ ip -4 route del 192.0.2.0/28 vrf v$h2
+ simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
+}
+
+router_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+ ip link set dev $swp2 up
+ __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+ __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+ ip link set dev $swp2 down
+
+ __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
new file mode 100755
index 000000000000..fef88eb4b873
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ vlan
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ vlan_create $h1 555 v$h1 192.0.2.1/28 2001:db8:1::1/64
+ ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del 2001:db8:2::/64 vrf v$h1
+ ip -4 route del 192.0.2.128/28 vrf v$h1
+ vlan_destroy $h1 555
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+ ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+ ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip -6 route del 2001:db8:1::/64 vrf v$h2
+ ip -4 route del 192.0.2.0/28 vrf v$h2
+ simple_if_fini $h2 192.0.2.130/28
+}
+
+router_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1
+ ip link set dev br1 up
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+
+ bridge vlan add dev br1 vid 555 self pvid untagged
+ bridge vlan add dev $swp1 vid 555
+
+ __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+ ip link set dev $swp2 up
+ __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+ __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+ ip link set dev $swp2 down
+
+ __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+vlan()
+{
+ RET=0
+
+ bridge vlan add dev br1 vid 333 self
+ check_err $? "Can't add a non-PVID VLAN"
+ bridge vlan del dev br1 vid 333 self
+ check_err $? "Can't remove a non-PVID VLAN"
+
+ log_test "vlan"
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
new file mode 100755
index 000000000000..7bd2ebb6e9de
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -0,0 +1,233 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4"
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ vrf_create "vrf-h1"
+ ip link set dev $h1 master vrf-h1
+
+ ip link set dev vrf-h1 up
+ ip link set dev $h1 up
+
+ ip address add 192.0.2.2/24 dev $h1
+
+ ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+ ip route add 198.51.200.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+}
+
+h1_destroy()
+{
+ ip route del 198.51.200.0/24 vrf vrf-h1
+ ip route del 198.51.100.0/24 vrf vrf-h1
+
+ ip address del 192.0.2.2/24 dev $h1
+
+ ip link set dev $h1 down
+ vrf_destroy "vrf-h1"
+}
+
+h2_create()
+{
+ vrf_create "vrf-h2"
+ ip link set dev $h2 master vrf-h2
+
+ ip link set dev vrf-h2 up
+ ip link set dev $h2 up
+
+ ip address add 198.51.100.2/24 dev $h2
+
+ ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+ ip route add 198.51.200.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+}
+
+h2_destroy()
+{
+ ip route del 198.51.200.0/24 vrf vrf-h2
+ ip route del 192.0.2.0/24 vrf vrf-h2
+
+ ip address del 198.51.100.2/24 dev $h2
+
+ ip link set dev $h2 down
+ vrf_destroy "vrf-h2"
+}
+
+h3_create()
+{
+ vrf_create "vrf-h3"
+ ip link set dev $h3 master vrf-h3
+
+ ip link set dev vrf-h3 up
+ ip link set dev $h3 up
+
+ ip address add 198.51.200.2/24 dev $h3
+
+ ip route add 192.0.2.0/24 vrf vrf-h3 nexthop via 198.51.200.1
+ ip route add 198.51.100.0/24 vrf vrf-h3 nexthop via 198.51.200.1
+}
+
+h3_destroy()
+{
+ ip route del 198.51.100.0/24 vrf vrf-h3
+ ip route del 192.0.2.0/24 vrf vrf-h3
+
+ ip address del 198.51.200.2/24 dev $h3
+
+ ip link set dev $h3 down
+ vrf_destroy "vrf-h3"
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+ ip link set dev $rp3 up
+
+ ip address add 192.0.2.1/24 dev $rp1
+
+ ip address add 198.51.100.1/24 dev $rp2
+ ip address add 198.51.200.1/24 dev $rp3
+}
+
+router_destroy()
+{
+ ip address del 198.51.200.1/24 dev $rp3
+ ip address del 198.51.100.1/24 dev $rp2
+
+ ip address del 192.0.2.1/24 dev $rp1
+
+ ip link set dev $rp3 down
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+bc_forwarding_disable()
+{
+ sysctl_set net.ipv4.conf.all.bc_forwarding 0
+ sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
+}
+
+bc_forwarding_enable()
+{
+ sysctl_set net.ipv4.conf.all.bc_forwarding 1
+ sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
+}
+
+bc_forwarding_restore()
+{
+ sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
+ sysctl_restore net.ipv4.conf.all.bc_forwarding
+}
+
+ping_test_from()
+{
+ local oif=$1
+ local dip=$2
+ local from=$3
+ local fail=${4:-0}
+
+ RET=0
+
+ log_info "ping $dip, expected reply from $from"
+ ip vrf exec $(master_name_get $oif) \
+ $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null
+ check_err_fail $fail $?
+}
+
+ping_ipv4()
+{
+ sysctl_set net.ipv4.icmp_echo_ignore_broadcasts 0
+
+ bc_forwarding_disable
+ log_info "bc_forwarding disabled on r1 =>"
+ ping_test_from $h1 198.51.100.255 192.0.2.1
+ log_test "h1 -> net2: reply from r1 (not forwarding)"
+ ping_test_from $h1 198.51.200.255 192.0.2.1
+ log_test "h1 -> net3: reply from r1 (not forwarding)"
+ ping_test_from $h1 192.0.2.255 192.0.2.1
+ log_test "h1 -> net1: reply from r1 (not dropping)"
+ ping_test_from $h1 255.255.255.255 192.0.2.1
+ log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)"
+
+ ping_test_from $h2 192.0.2.255 198.51.100.1
+ log_test "h2 -> net1: reply from r1 (not forwarding)"
+ ping_test_from $h2 198.51.200.255 198.51.100.1
+ log_test "h2 -> net3: reply from r1 (not forwarding)"
+ ping_test_from $h2 198.51.100.255 198.51.100.1
+ log_test "h2 -> net2: reply from r1 (not dropping)"
+ ping_test_from $h2 255.255.255.255 198.51.100.1
+ log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)"
+ bc_forwarding_restore
+
+ bc_forwarding_enable
+ log_info "bc_forwarding enabled on r1 =>"
+ ping_test_from $h1 198.51.100.255 198.51.100.2
+ log_test "h1 -> net2: reply from h2 (forwarding)"
+ ping_test_from $h1 198.51.200.255 198.51.200.2
+ log_test "h1 -> net3: reply from h3 (forwarding)"
+ ping_test_from $h1 192.0.2.255 192.0.2.1 1
+ log_test "h1 -> net1: no reply (dropping)"
+ ping_test_from $h1 255.255.255.255 192.0.2.1
+ log_test "h1 -> 255.255.255.255: reply from r1 (not forwarding)"
+
+ ping_test_from $h2 192.0.2.255 192.0.2.2
+ log_test "h2 -> net1: reply from h1 (forwarding)"
+ ping_test_from $h2 198.51.200.255 198.51.200.2
+ log_test "h2 -> net3: reply from h3 (forwarding)"
+ ping_test_from $h2 198.51.100.255 198.51.100.1 1
+ log_test "h2 -> net2: no reply (dropping)"
+ ping_test_from $h2 255.255.255.255 198.51.100.1
+ log_test "h2 -> 255.255.255.255: reply from r1 (not forwarding)"
+ bc_forwarding_restore
+
+ sysctl_restore net.ipv4.icmp_echo_ignore_broadcasts
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh
index 8b6d0fb6d604..79a209927962 100755
--- a/tools/testing/selftests/net/forwarding/router_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/router_multipath.sh
@@ -159,45 +159,6 @@ router2_destroy()
vrf_destroy "vrf-r2"
}
-multipath_eval()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local packets_rp12=$4
- local packets_rp13=$5
- local weights_ratio packets_ratio diff
-
- RET=0
-
- if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
- check_err 1 "Packet difference is 0"
- log_test "Multipath"
- log_info "Expected ratio $weights_ratio"
- return
- fi
-
- if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
- weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
- | bc -l)
- packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
- | bc -l)
- else
- weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \
- bc -l)
- packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \
- bc -l)
- fi
-
- diff=$(echo $weights_ratio - $packets_ratio | bc -l)
- diff=${diff#-}
-
- test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
- check_err $? "Too large discrepancy between expected and measured ratios"
- log_test "$desc"
- log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
-}
-
multipath4_test()
{
local desc="$1"
diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh
index d2c783e94df3..2934fb5ed2a2 100755
--- a/tools/testing/selftests/net/forwarding/tc_chains.sh
+++ b/tools/testing/selftests/net/forwarding/tc_chains.sh
@@ -1,7 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="unreachable_chain_test gact_goto_chain_test"
+ALL_TESTS="unreachable_chain_test gact_goto_chain_test create_destroy_chain \
+ template_filter_fits"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -80,6 +81,87 @@ gact_goto_chain_test()
log_test "gact goto chain ($tcflags)"
}
+create_destroy_chain()
+{
+ RET=0
+
+ tc chain add dev $h2 ingress
+ check_err $? "Failed to create default chain"
+
+ output="$(tc -j chain get dev $h2 ingress)"
+ check_err $? "Failed to get default chain"
+
+ echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null
+ check_err $? "Unexpected output for default chain"
+
+ tc chain add dev $h2 ingress chain 1
+ check_err $? "Failed to create chain 1"
+
+ output="$(tc -j chain get dev $h2 ingress chain 1)"
+ check_err $? "Failed to get chain 1"
+
+ echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null
+ check_err $? "Unexpected output for chain 1"
+
+ output="$(tc -j chain show dev $h2 ingress)"
+ check_err $? "Failed to dump chains"
+
+ echo $output | jq -e ".[] | select(.chain == 0)" &> /dev/null
+ check_err $? "Can't find default chain in dump"
+
+ echo $output | jq -e ".[] | select(.chain == 1)" &> /dev/null
+ check_err $? "Can't find chain 1 in dump"
+
+ tc chain del dev $h2 ingress
+ check_err $? "Failed to destroy default chain"
+
+ tc chain del dev $h2 ingress chain 1
+ check_err $? "Failed to destroy chain 1"
+
+ log_test "create destroy chain"
+}
+
+template_filter_fits()
+{
+ RET=0
+
+ tc chain add dev $h2 ingress protocol ip \
+ flower dst_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null
+ tc chain add dev $h2 ingress chain 1 protocol ip \
+ flower src_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1101 \
+ flower dst_mac $h2mac action drop
+ check_err $? "Failed to insert filter which fits template"
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1102 \
+ flower src_mac $h2mac action drop &> /dev/null
+ check_fail $? "Incorrectly succeded to insert filter which does not template"
+
+ tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \
+ flower src_mac $h2mac action drop
+ check_err $? "Failed to insert filter which fits template"
+
+ tc filter add dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \
+ flower dst_mac $h2mac action drop &> /dev/null
+ check_fail $? "Incorrectly succeded to insert filter which does not template"
+
+ tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1102 \
+ flower &> /dev/null
+ tc filter del dev $h2 ingress chain 1 protocol ip pref 1 handle 1101 \
+ flower &> /dev/null
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1102 \
+ flower &> /dev/null
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1101 \
+ flower &> /dev/null
+
+ tc chain del dev $h2 ingress chain 1
+ tc chain del dev $h2 ingress
+
+ log_test "template filter fits"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -103,6 +185,8 @@ cleanup()
vrf_cleanup
}
+check_tc_chain_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/tc_shblocks.sh b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
index b5b917203815..9826a446e2c0 100755
--- a/tools/testing/selftests/net/forwarding/tc_shblocks.sh
+++ b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
@@ -105,6 +105,8 @@ cleanup()
ip link set $swp2 address $swp2origmac
}
+check_tc_shblock_support
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/ip6_gre_headroom.sh b/tools/testing/selftests/net/ip6_gre_headroom.sh
new file mode 100755
index 000000000000..5b41e8bb6e2d
--- /dev/null
+++ b/tools/testing/selftests/net/ip6_gre_headroom.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that enough headroom is reserved for the first packet passing through an
+# IPv6 GRE-like netdevice.
+
+setup_prepare()
+{
+ ip link add h1 type veth peer name swp1
+ ip link add h3 type veth peer name swp3
+
+ ip link set dev h1 up
+ ip address add 192.0.2.1/28 dev h1
+
+ ip link add dev vh3 type vrf table 20
+ ip link set dev h3 master vh3
+ ip link set dev vh3 up
+ ip link set dev h3 up
+
+ ip link set dev swp3 up
+ ip address add dev swp3 2001:db8:2::1/64
+ ip address add dev swp3 2001:db8:2::3/64
+
+ ip link set dev swp1 up
+ tc qdisc add dev swp1 clsact
+
+ ip link add name er6 type ip6erspan \
+ local 2001:db8:2::1 remote 2001:db8:2::2 oseq okey 123
+ ip link set dev er6 up
+
+ ip link add name gt6 type ip6gretap \
+ local 2001:db8:2::3 remote 2001:db8:2::4
+ ip link set dev gt6 up
+
+ sleep 1
+}
+
+cleanup()
+{
+ ip link del dev gt6
+ ip link del dev er6
+ ip link del dev swp1
+ ip link del dev swp3
+ ip link del dev vh3
+}
+
+test_headroom()
+{
+ local type=$1; shift
+ local tundev=$1; shift
+
+ tc filter add dev swp1 ingress pref 1000 matchall skip_hw \
+ action mirred egress mirror dev $tundev
+ ping -I h1 192.0.2.2 -c 1 -w 2 &> /dev/null
+ tc filter del dev swp1 ingress pref 1000
+
+ # If it doesn't panic, it passes.
+ printf "TEST: %-60s [PASS]\n" "$type headroom"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+test_headroom ip6gretap gt6
+test_headroom ip6erspan er6
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 0d7a44fa30af..08c341b49760 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -525,18 +525,21 @@ kci_test_macsec()
#-------------------------------------------------------------------
kci_test_ipsec()
{
- srcip="14.0.0.52"
- dstip="14.0.0.70"
+ ret=0
algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+ srcip=192.168.123.1
+ dstip=192.168.123.2
+ spi=7
+
+ ip addr add $srcip dev $devdummy
# flush to be sure there's nothing configured
ip x s flush ; ip x p flush
check_err $?
# start the monitor in the background
- tmpfile=`mktemp ipsectestXXX`
- ip x m > $tmpfile &
- mpid=$!
+ tmpfile=`mktemp /var/run/ipsectestXXX`
+ mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null`
sleep 0.2
ipsecid="proto esp src $srcip dst $dstip spi 0x07"
@@ -599,6 +602,7 @@ kci_test_ipsec()
check_err $?
ip x p flush
check_err $?
+ ip addr del $srcip/32 dev $devdummy
if [ $ret -ne 0 ]; then
echo "FAIL: ipsec"
@@ -607,6 +611,119 @@ kci_test_ipsec()
echo "PASS: ipsec"
}
+#-------------------------------------------------------------------
+# Example commands
+# ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
+# spi 0x07 mode transport reqid 0x07 replay-window 32 \
+# aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
+# sel src 14.0.0.52/24 dst 14.0.0.70/24
+# offload dev sim1 dir out
+# ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \
+# tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \
+# spi 0x07 mode transport reqid 0x07
+#
+#-------------------------------------------------------------------
+kci_test_ipsec_offload()
+{
+ ret=0
+ algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+ srcip=192.168.123.3
+ dstip=192.168.123.4
+ dev=simx1
+ sysfsd=/sys/kernel/debug/netdevsim/$dev
+ sysfsf=$sysfsd/ipsec
+
+ # setup netdevsim since dummydev doesn't have offload support
+ modprobe netdevsim
+ check_err $?
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload can't load netdevsim"
+ return 1
+ fi
+
+ ip link add $dev type netdevsim
+ ip addr add $srcip dev $dev
+ ip link set $dev up
+ if [ ! -d $sysfsd ] ; then
+ echo "FAIL: ipsec_offload can't create device $dev"
+ return 1
+ fi
+ if [ ! -f $sysfsf ] ; then
+ echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
+ return 1
+ fi
+
+ # flush to be sure there's nothing configured
+ ip x s flush ; ip x p flush
+
+ # create offloaded SAs, both in and out
+ ip x p add dir out src $srcip/24 dst $dstip/24 \
+ tmpl proto esp src $srcip dst $dstip spi 9 \
+ mode transport reqid 42
+ check_err $?
+ ip x p add dir out src $dstip/24 dst $srcip/24 \
+ tmpl proto esp src $dstip dst $srcip spi 9 \
+ mode transport reqid 42
+ check_err $?
+
+ ip x s add proto esp src $srcip dst $dstip spi 9 \
+ mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \
+ offload dev $dev dir out
+ check_err $?
+ ip x s add proto esp src $dstip dst $srcip spi 9 \
+ mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \
+ offload dev $dev dir in
+ check_err $?
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload can't create SA"
+ return 1
+ fi
+
+ # does offload show up in ip output
+ lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"`
+ if [ $lines -ne 2 ] ; then
+ echo "FAIL: ipsec_offload SA offload missing from list output"
+ check_err 1
+ fi
+
+ # use ping to exercise the Tx path
+ ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
+
+ # does driver have correct offload info
+ diff $sysfsf - << EOF
+SA count=2 tx=3
+sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
+sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[0] key=0x34333231 38373635 32313039 36353433
+sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
+sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[1] key=0x34333231 38373635 32313039 36353433
+EOF
+ if [ $? -ne 0 ] ; then
+ echo "FAIL: ipsec_offload incorrect driver data"
+ check_err 1
+ fi
+
+ # does offload get removed from driver
+ ip x s flush
+ ip x p flush
+ lines=`grep -c "SA count=0" $sysfsf`
+ if [ $lines -ne 1 ] ; then
+ echo "FAIL: ipsec_offload SA not removed from driver"
+ check_err 1
+ fi
+
+ # clean up any leftovers
+ ip link del $dev
+ rmmod netdevsim
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: ipsec_offload"
+ return 1
+ fi
+ echo "PASS: ipsec_offload"
+}
+
kci_test_gretap()
{
testns="testns"
@@ -861,6 +978,7 @@ kci_test_rtnl()
kci_test_encap
kci_test_macsec
kci_test_ipsec
+ kci_test_ipsec_offload
kci_del_dummy
}
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
new file mode 100644
index 000000000000..b3ebf2646e52
--- /dev/null
+++ b/tools/testing/selftests/net/tls.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <linux/tls.h>
+#include <linux/tcp.h>
+#include <linux/socket.h>
+
+#include <sys/types.h>
+#include <sys/sendfile.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+
+#include "../kselftest_harness.h"
+
+#define TLS_PAYLOAD_MAX_LEN 16384
+#define SOL_TLS 282
+
+FIXTURE(tls)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_SETUP(tls)
+{
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ self->notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ self->fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(self->fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ self->notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!self->notls) {
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ self->cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(self->cfd, 0);
+
+ if (!self->notls) {
+ ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(sfd);
+}
+
+FIXTURE_TEARDOWN(tls)
+{
+ close(self->fd);
+ close(self->cfd);
+}
+
+TEST_F(tls, sendfile)
+{
+ int filefd = open("/proc/self/exe", O_RDONLY);
+ struct stat st;
+
+ EXPECT_GE(filefd, 0);
+ fstat(filefd, &st);
+ EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+}
+
+TEST_F(tls, send_then_sendfile)
+{
+ int filefd = open("/proc/self/exe", O_RDONLY);
+ char const *test_str = "test_send";
+ int to_send = strlen(test_str) + 1;
+ char recv_buf[10];
+ struct stat st;
+ char *buf;
+
+ EXPECT_GE(filefd, 0);
+ fstat(filefd, &st);
+ buf = (char *)malloc(st.st_size);
+
+ EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send);
+ EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send);
+ EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0);
+
+ EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+ EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size);
+}
+
+TEST_F(tls, recv_max)
+{
+ unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+ char recv_mem[TLS_PAYLOAD_MAX_LEN];
+ char buf[TLS_PAYLOAD_MAX_LEN];
+
+ EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
+ EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recv_small)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ send_len = strlen(test_str) + 1;
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, msg_more)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10 * 2];
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT),
+ send_len * 2);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_single)
+{
+ struct msghdr msg;
+
+ char const *test_str = "test_sendmsg";
+ size_t send_len = 13;
+ struct iovec vec;
+ char buf[13];
+
+ vec.iov_base = (char *)test_str;
+ vec.iov_len = send_len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_large)
+{
+ void *mem = malloc(16384);
+ size_t send_len = 16384;
+ size_t sends = 128;
+ struct msghdr msg;
+ size_t recvs = 0;
+ size_t sent = 0;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ while (sent++ < sends) {
+ struct iovec vec = { (void *)mem, send_len };
+
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+ }
+
+ while (recvs++ < sends)
+ EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+
+ free(mem);
+}
+
+TEST_F(tls, sendmsg_multiple)
+{
+ char const *test_str = "test_sendmsg_multiple";
+ struct iovec vec[5];
+ char *test_strs[5];
+ struct msghdr msg;
+ int total_len = 0;
+ int len_cmp = 0;
+ int iov_len = 5;
+ char *buf;
+ int i;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ for (i = 0; i < iov_len; i++) {
+ test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+ snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+ vec[i].iov_base = (void *)test_strs[i];
+ vec[i].iov_len = strlen(test_strs[i]) + 1;
+ total_len += vec[i].iov_len;
+ }
+ msg.msg_iov = vec;
+ msg.msg_iovlen = iov_len;
+
+ EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
+ buf = malloc(total_len);
+ EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
+ for (i = 0; i < iov_len; i++) {
+ EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+ strlen(test_strs[i])),
+ 0);
+ len_cmp += strlen(buf + len_cmp) + 1;
+ }
+ for (i = 0; i < iov_len; i++)
+ free(test_strs[i]);
+ free(buf);
+}
+
+TEST_F(tls, sendmsg_multiple_stress)
+{
+ char const *test_str = "abcdefghijklmno";
+ struct iovec vec[1024];
+ char *test_strs[1024];
+ int iov_len = 1024;
+ int total_len = 0;
+ char buf[1 << 14];
+ struct msghdr msg;
+ int len_cmp = 0;
+ int i;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ for (i = 0; i < iov_len; i++) {
+ test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+ snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+ vec[i].iov_base = (void *)test_strs[i];
+ vec[i].iov_len = strlen(test_strs[i]) + 1;
+ total_len += vec[i].iov_len;
+ }
+ msg.msg_iov = vec;
+ msg.msg_iovlen = iov_len;
+
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+ EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+
+ for (i = 0; i < iov_len; i++)
+ len_cmp += strlen(buf + len_cmp) + 1;
+
+ for (i = 0; i < iov_len; i++)
+ free(test_strs[i]);
+}
+
+TEST_F(tls, splice_from_pipe)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_GE(write(p[1], mem_send, send_len), 0);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_from_pipe2)
+{
+ int send_len = 16000;
+ char mem_send[16000];
+ char mem_recv[16000];
+ int p2[2];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ ASSERT_GE(pipe(p2), 0);
+ EXPECT_GE(write(p[1], mem_send, 8000), 0);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
+ EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, send_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ char const *test_str = "test_read";
+ int send_len2 = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
+ EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
+
+ EXPECT_GE(write(p[1], mem_send, send_len), send_len);
+ EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
+
+ EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_to_pipe)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
+ EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
+ EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single)
+{
+ char const *test_str = "test_recvmsg_single";
+ int send_len = strlen(test_str) + 1;
+ char buf[20];
+ struct msghdr hdr;
+ struct iovec vec;
+
+ memset(&hdr, 0, sizeof(hdr));
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ vec.iov_base = (char *)buf;
+ vec.iov_len = send_len;
+ hdr.msg_iovlen = 1;
+ hdr.msg_iov = &vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single_max)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char send_mem[TLS_PAYLOAD_MAX_LEN];
+ char recv_mem[TLS_PAYLOAD_MAX_LEN];
+ struct iovec vec;
+ struct msghdr hdr;
+
+ EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
+ vec.iov_base = (char *)recv_mem;
+ vec.iov_len = TLS_PAYLOAD_MAX_LEN;
+
+ hdr.msg_iovlen = 1;
+ hdr.msg_iov = &vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_multiple)
+{
+ unsigned int msg_iovlen = 1024;
+ unsigned int len_compared = 0;
+ struct iovec vec[1024];
+ char *iov_base[1024];
+ unsigned int iov_len = 16;
+ int send_len = 1 << 14;
+ char buf[1 << 14];
+ struct msghdr hdr;
+ int i;
+
+ EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
+ for (i = 0; i < msg_iovlen; i++) {
+ iov_base[i] = (char *)malloc(iov_len);
+ vec[i].iov_base = iov_base[i];
+ vec[i].iov_len = iov_len;
+ }
+
+ hdr.msg_iovlen = msg_iovlen;
+ hdr.msg_iov = vec;
+ EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+ for (i = 0; i < msg_iovlen; i++)
+ len_compared += iov_len;
+
+ for (i = 0; i < msg_iovlen; i++)
+ free(iov_base[i]);
+}
+
+TEST_F(tls, single_send_multiple_recv)
+{
+ unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2;
+ unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+ char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
+ char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
+
+ EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
+ memset(recv_mem, 0, total_len);
+
+ EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
+}
+
+TEST_F(tls, multiple_send_single_recv)
+{
+ unsigned int total_len = 2 * 10;
+ unsigned int send_len = 10;
+ char recv_mem[2 * 10];
+ char send_mem[10];
+
+ EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+ EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+ memset(recv_mem, 0, total_len);
+ EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len);
+
+ EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+ EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
+}
+
+TEST_F(tls, recv_partial)
+{
+ char const *test_str = "test_read_partial";
+ char const *test_str_first = "test_read";
+ char const *test_str_second = "_partial";
+ int send_len = strlen(test_str) + 1;
+ char recv_mem[18];
+
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
+ 0);
+}
+
+TEST_F(tls, recv_nonblock)
+{
+ char buf[4096];
+ bool err;
+
+ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1);
+ err = (errno == EAGAIN || errno == EWOULDBLOCK);
+ EXPECT_EQ(err, true);
+}
+
+TEST_F(tls, recv_peek)
+{
+ char const *test_str = "test_read_peek";
+ int send_len = strlen(test_str) + 1;
+ char buf[15];
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+ memset(buf, 0, sizeof(buf));
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_peek_multiple)
+{
+ char const *test_str = "test_read_peek";
+ int send_len = strlen(test_str) + 1;
+ unsigned int num_peeks = 100;
+ char buf[15];
+ int i;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ for (i = 0; i < num_peeks; i++) {
+ EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+ memset(buf, 0, sizeof(buf));
+ }
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, pollin)
+{
+ char const *test_str = "test_poll";
+ struct pollfd fd = { 0, 0, 0 };
+ char buf[10];
+ int send_len = 10;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+
+ EXPECT_EQ(poll(&fd, 1, 20), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ /* Test timing out */
+ EXPECT_EQ(poll(&fd, 1, 20), 0);
+}
+
+TEST_F(tls, poll_wait)
+{
+ char const *test_str = "test_poll_wait";
+ int send_len = strlen(test_str) + 1;
+ struct pollfd fd = { 0, 0, 0 };
+ char recv_mem[15];
+
+ fd.fd = self->cfd;
+ fd.events = POLLIN;
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ /* Set timeout to inf. secs */
+ EXPECT_EQ(poll(&fd, 1, -1), 1);
+ EXPECT_EQ(fd.revents & POLLIN, 1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len);
+}
+
+TEST_F(tls, blocking)
+{
+ size_t data = 100000;
+ int res = fork();
+
+ EXPECT_NE(res, -1);
+
+ if (res) {
+ /* parent */
+ size_t left = data;
+ char buf[16384];
+ int status;
+ int pid2;
+
+ while (left) {
+ int res = send(self->fd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+
+ pid2 = wait(&status);
+ EXPECT_EQ(status, 0);
+ EXPECT_EQ(res, pid2);
+ } else {
+ /* child */
+ size_t left = data;
+ char buf[16384];
+
+ while (left) {
+ int res = recv(self->cfd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ }
+}
+
+TEST_F(tls, nonblocking)
+{
+ size_t data = 100000;
+ int sendbuf = 100;
+ int flags;
+ int res;
+
+ flags = fcntl(self->fd, F_GETFL, 0);
+ fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
+ fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK);
+
+ /* Ensure nonblocking behavior by imposing a small send
+ * buffer.
+ */
+ EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF,
+ &sendbuf, sizeof(sendbuf)), 0);
+
+ res = fork();
+ EXPECT_NE(res, -1);
+
+ if (res) {
+ /* parent */
+ bool eagain = false;
+ size_t left = data;
+ char buf[16384];
+ int status;
+ int pid2;
+
+ while (left) {
+ int res = send(self->fd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ if (res == -1 && errno == EAGAIN) {
+ eagain = true;
+ usleep(10000);
+ continue;
+ }
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+
+ EXPECT_TRUE(eagain);
+ pid2 = wait(&status);
+
+ EXPECT_EQ(status, 0);
+ EXPECT_EQ(res, pid2);
+ } else {
+ /* child */
+ bool eagain = false;
+ size_t left = data;
+ char buf[16384];
+
+ while (left) {
+ int res = recv(self->cfd, buf,
+ left > 16384 ? 16384 : left, 0);
+
+ if (res == -1 && errno == EAGAIN) {
+ eagain = true;
+ usleep(10000);
+ continue;
+ }
+ EXPECT_GE(res, 0);
+ left -= res;
+ }
+ EXPECT_TRUE(eagain);
+ }
+}
+
+TEST_F(tls, control_msg)
+{
+ if (self->notls)
+ return;
+
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ char const *test_str = "test_read";
+ int cmsg_len = sizeof(char);
+ char record_type = 100;
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ int send_len = 10;
+ struct iovec vec;
+ char buf[10];
+
+ vec.iov_base = (char *)test_str;
+ vec.iov_len = 10;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_TLS;
+ /* test sending non-record types. */
+ cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+ cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+ *CMSG_DATA(cmsg) = record_type;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ /* Should fail because we didn't provide a control message */
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+
+ vec.iov_base = buf;
+ EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ record_type = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index 3a2f51fc7fd4..a022792d392a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -336,6 +336,30 @@
]
},
{
+ "id": "b10b",
+ "name": "Add all 7 csum actions",
+ "category": [
+ "actions",
+ "csum"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action csum",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action csum icmp ip4h sctp igmp udplite udp tcp index 7",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action csum index 7",
+ "matchPattern": "action order [0-9]*: csum \\(iph, icmp, igmp, tcp, udp, udplite, sctp\\).*index 7 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action csum"
+ ]
+ },
+ {
"id": "ce92",
"name": "Add csum udp action with cookie",
"category": [
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
new file mode 100644
index 000000000000..10b2d894e436
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -0,0 +1,917 @@
+[
+ {
+ "id": "2b11",
+ "name": "Add tunnel_key set action with mandatory parameters",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "dc6b",
+ "name": "Add tunnel_key set action with missing mandatory src_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set dst_ip 20.20.20.2 id 100",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*dst_ip 20.20.20.2.*key_id 100",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "7f25",
+ "name": "Add tunnel_key set action with missing mandatory dst_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 id 100",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*key_id 100",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "ba4e",
+ "name": "Add tunnel_key set action with missing mandatory id parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "a5e0",
+ "name": "Add tunnel_key set action with invalid src_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 300.168.100.1 dst_ip 192.168.200.1 id 7 index 1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 300.168.100.1.*dst_ip 192.168.200.1.*key_id 7.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "eaa8",
+ "name": "Add tunnel_key set action with invalid dst_ip parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.800.1 id 10 index 11",
+ "expExitCode": "1",
+ "verifyCmd": "$TC actions get action tunnel_key index 11",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 192.168.100.1.*dst_ip 192.168.800.1.*key_id 10.*index 11 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "3b09",
+ "name": "Add tunnel_key set action with invalid id parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 112233445566778899 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 112233445566778899.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "9625",
+ "name": "Add tunnel_key set action with invalid dst_port parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 dst_port 998877 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 11.*dst_port 998877.*index 1 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "05af",
+ "name": "Add tunnel_key set action with optional dst_port parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.200.1 id 789 dst_port 4000 index 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 10",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.100.1.*dst_ip 192.168.200.1.*key_id 789.*dst_port 4000.*index 10 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "da80",
+ "name": "Add tunnel_key set action with index at 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*id 11.*index 4294967295 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "d407",
+ "name": "Add tunnel_key set action with index exceeding 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295678",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295678",
+ "matchPattern": "action order [0-9]+: tunnel_key set.*index 4294967295678 ref",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "5cba",
+ "name": "Add tunnel_key set action with id value at 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 4294967295 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 4294967295.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "e84a",
+ "name": "Add tunnel_key set action with id value exceeding 32-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42949672955 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42949672955.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "9c19",
+ "name": "Add tunnel_key set action with dst_port value at 16-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "3bd9",
+ "name": "Add tunnel_key set action with dst_port value exceeding 16-bit maximum",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535789 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535789.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "68e2",
+ "name": "Add tunnel_key unset action",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key unset index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*unset.*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "6192",
+ "name": "Add tunnel_key unset continue action",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key unset continue index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*unset continue.*index 1 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "061d",
+ "name": "Add tunnel_key set continue action with cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "8acb",
+ "name": "Add tunnel_key set continue action with invalid cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "a07e",
+ "name": "Add tunnel_key action with no set/unset command specified",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "b227",
+ "name": "Add tunnel_key action with csum option",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 csum index 99",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 99",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*csum pipe.*index 99",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "58a7",
+ "name": "Add tunnel_key action with nocsum option",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7823 nocsum index 234",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 234",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7823.*nocsum pipe.*index 234",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "2575",
+ "name": "Add tunnel_key action with not-supported parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 foobar 999 index 4",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 4",
+ "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*foobar 999.*index 4",
+ "matchCount": "0",
+ "teardown": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ]
+ },
+ {
+ "id": "7a88",
+ "name": "Add tunnel_key action with cookie parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 4",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4f20",
+ "name": "Add tunnel_key action with a single geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "e33d",
+ "name": "Add tunnel_key action with multiple geneve options parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "0778",
+ "name": "Add tunnel_key action with invalid class geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4ae8",
+ "name": "Add tunnel_key action with invalid type geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "4039",
+ "name": "Add tunnel_key action with short data length geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "26a6",
+ "name": "Add tunnel_key action with non-multiple of 4 data length geneve option parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "f44d",
+ "name": "Add tunnel_key action with incomplete geneve options parameter",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "7afc",
+ "name": "Replace tunnel_key set action with all parameters",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 csum id 1 index 1"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 nocsum id 11 index 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*nocsum pipe.*index 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "364d",
+ "name": "Replace tunnel_key set action with all parameters and cookie",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions get action tunnel_key index 1",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "937c",
+ "name": "Fetch all existing tunnel_key actions",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+ "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 jump 10 index 2",
+ "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+ "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+ ],
+ "cmdUnderTest": "$TC actions list action tunnel_key",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*nocsum pipe.*index 1.*set.*src_ip 11.10.10.1.*dst_ip 21.20.20.2.*key_id 2.*dst_port 3129.*csum jump 10.*index 2.*set.*src_ip 12.10.10.1.*dst_ip 22.20.20.2.*key_id 3.*dst_port 3130.*csum pass.*index 3.*set.*src_ip 13.10.10.1.*dst_ip 23.20.20.2.*key_id 4.*dst_port 3131.*nocsum continue.*index 4",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ },
+ {
+ "id": "6783",
+ "name": "Flush all existing tunnel_key actions",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+ "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 reclassify index 2",
+ "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+ "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+ ],
+ "cmdUnderTest": "$TC actions flush action tunnel_key",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action tunnel_key",
+ "matchPattern": "action order [0-9]+:.*",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
new file mode 100644
index 000000000000..3b97cfd7e0f8
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/fw.json
@@ -0,0 +1,1049 @@
+[
+ {
+ "id": "901f",
+ "name": "Add fw filter with prio at 32-bit maxixum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65535 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65535 protocol all fw",
+ "matchPattern": "pref 65535 fw.*handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "51e2",
+ "name": "Add fw filter with prio exceeding 32-bit maxixum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 65536 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 65536 protocol all fw",
+ "matchPattern": "pref 65536 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d987",
+ "name": "Add fw filter with action ok",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "affe",
+ "name": "Add fw filter with action continue",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action continue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action continue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "28bc",
+ "name": "Add fw filter with action pipe",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8da2",
+ "name": "Add fw filter with action drop",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol all prio 1 fw",
+ "matchPattern": "handle 0x1.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9436",
+ "name": "Add fw filter with action reclassify",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action reclassify",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action reclassify",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "95bb",
+ "name": "Add fw filter with action jump 10",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action jump 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action jump 10",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3d74",
+ "name": "Add fw filter with action goto chain 5",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action goto chain 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action goto chain 5",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "eb8f",
+ "name": "Add fw filter with invalid action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw action pump",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "handle 0x1.*gact action pump",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6a79",
+ "name": "Add fw filter with missing mandatory action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "filter protocol all pref [0-9]+ fw.*handle 0x1",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8298",
+ "name": "Add fw filter with cookie",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "a88c",
+ "name": "Add fw filter with invalid cookie",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action continue cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 2 protocol all fw",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action continue.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "10f6",
+ "name": "Add fw filter with handle in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2ff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa1b2ff.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9d51",
+ "name": "Add fw filter with handle at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967295 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xffffffff.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d939",
+ "name": "Add fw filter with handle exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4294967296 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "658c",
+ "name": "Add fw filter with mask in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/0xa1b2f prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa/0xa1b2f",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "86be",
+ "name": "Add fw filter with mask at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa[^/]",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "e635",
+ "name": "Add fw filter with mask exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10/4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6cab",
+ "name": "Add fw filter with handle/mask in hex",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 0xa1b2cdff/0x1a2bffdc prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xa1b2cdff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xa1b2cdff/0x1a2bffdc",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8700",
+ "name": "Add fw filter with handle/mask at 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967295/4294967295 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 0xffffffff prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0xffffffff[^/]",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7d62",
+ "name": "Add fw filter with handle/mask exceeding 32-bit maximum",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 4294967296/4294967296 prio 1 fw action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7b69",
+ "name": "Add fw filter with missing mandatory handle",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 1 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "filter protocol all.*fw.*handle.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d68b",
+ "name": "Add fw filter with invalid parent",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent aa11b1b2: handle 1 prio 1 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter dev $DEV1 parent aa11b1b2: handle 1 prio 1 protocol all fw",
+ "matchPattern": "filter protocol all pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "66e0",
+ "name": "Add fw filter with missing mandatory parent id",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 handle 1 prio 1 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "pref [0-9]+ fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0ff3",
+ "name": "Add fw filter with classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 3 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid :3.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9849",
+ "name": "Add fw filter with classid at root",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid ffff:ffff action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "pref 1 fw.*handle 0x1 classid root.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b7ff",
+ "name": "Add fw filter with classid - keeps last 8 (hex) digits",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 98765fedcb action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid 765f:edcb.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2b18",
+ "name": "Add fw filter with invalid classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 fw classid 6789defg action ok",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol all fw",
+ "matchPattern": "fw.*handle 0x1 classid 6789:defg.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "fade",
+ "name": "Add fw filter with flowid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 10 prio 1 fw flowid 1:10 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xa classid 1:10.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "33af",
+ "name": "Add fw filter with flowid then classid (same arg, takes second)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw flowid 10 classid 4 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :4.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8a8c",
+ "name": "Add fw filter with classid then flowid (same arg, takes second)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 11 prio 1 fw classid 4 flowid 10 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 11 prio 1 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 1 fw.*handle 0xb classid :10.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b50d",
+ "name": "Add fw filter with handle val/mask and flowid 10:1000",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 10/0xff fw flowid 10:1000 action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 10 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0xa/0xff classid 10:1000.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7207",
+ "name": "Add fw filter with protocol ip",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 1 handle 3 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 1 protocol ip fw",
+ "matchPattern": "filter parent ffff: protocol ip pref 1 fw.*handle 0x3.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "306d",
+ "name": "Add fw filter with protocol ipv6",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ipv6 prio 2 handle 4 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol ipv6 fw",
+ "matchPattern": "filter parent ffff: protocol ipv6 pref 2 fw.*handle 0x4.*gact action pass.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "9a78",
+ "name": "Add fw filter with protocol arp",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol arp prio 5 handle 7 fw action drop",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 7 prio 5 protocol arp fw",
+ "matchPattern": "filter parent ffff: protocol arp pref 5 fw.*handle 0x7.*gact action drop.*index [0-9]+ ref [0-9]+ bind [0-9]+",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "1821",
+ "name": "Add fw filter with protocol 802_3",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol 802_3 handle 1 prio 1 fw action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol 802_3 fw",
+ "matchPattern": "filter parent ffff: protocol 802_3 pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "2260",
+ "name": "Add fw filter with invalid protocol",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol igmp handle 1 prio 1 fw action ok",
+ "expExitCode": "255",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol igmp fw",
+ "matchPattern": "filter parent ffff: protocol igmp pref 1 fw.*handle 0x1.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "09d7",
+ "name": "Add fw filters protocol 802_3 and ip with conflicting priorities",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: protocol 802_3 prio 3 handle 7 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: protocol ip prio 3 handle 8 fw action ok",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 8 prio 3 protocol ip fw",
+ "matchPattern": "filter parent ffff: protocol ip pref 3 fw.*handle 0x8",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6973",
+ "name": "Add fw filters with same index, same action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: prio 6 handle 2 fw action continue index 5"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 8 handle 4 fw action continue index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 8 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 8 fw.*handle 0x4.*gact action continue.*index 5 ref 2 bind 2",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "fc06",
+ "name": "Add fw filters with action police",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 1kbit burst 10k index 5",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x5 rate 1Kbit burst 10Kb mtu 2Kb action reclassify overhead 0b.*ref 1 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "aac7",
+ "name": "Add fw filters with action police linklayer atm",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: prio 3 handle 4 fw action police rate 2mbit burst 200k linklayer atm index 8",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 3 protocol all fw",
+ "matchPattern": "filter parent ffff: protocol all pref 3 fw.*handle 0x4.*police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm.*ref 1 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "5339",
+ "name": "Del entire fw filter",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff:",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref.*handle.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "0e99",
+ "name": "Del single fw filter x1",
+ "__comment__": "First of two tests to check that one filter is there and the other isn't",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref 7.*handle 0x5.*gact action pass",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f54c",
+ "name": "Del single fw filter x2",
+ "__comment__": "Second of two tests to check that one filter is there and the other isn't",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 5 prio 7 fw action pass",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 9 fw action pass"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 prio 9 fw action pass",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "protocol all pref 9.*handle 0x3.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "ba94",
+ "name": "Del fw filter by prio",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 4 fw action ok",
+ "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: prio 4",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 4 fw.*gact action pass",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4acb",
+ "name": "Del fw filter by chain",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 chain 13 fw action pipe",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 5 chain 13 fw action pipe"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: chain 13",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "fw chain 13 handle.*gact action pipe",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3424",
+ "name": "Del fw filter by action (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 2 prio 4 fw action drop"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw action drop",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 2 prio 4 protocol all fw",
+ "matchPattern": "handle 0x2.*gact action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "da89",
+ "name": "Del fw filter by handle (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 3 prio 4 fw action continue"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 3 fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 3 prio 4 protocol all fw",
+ "matchPattern": "handle 0x3.*gact action continue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4d95",
+ "name": "Del fw filter by protocol (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw action pipe"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: protocol arp fw",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 4 prio 2 protocol arp fw",
+ "matchPattern": "filter parent ffff: protocol arp.*handle 0x4.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4736",
+ "name": "Del fw filter by flowid (invalid)",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 4 prio 2 fw action pipe flowid 45"
+ ],
+ "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: fw flowid 45",
+ "expExitCode": "2",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "handle 0x4.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3dcb",
+ "name": "Replace fw filter action",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "eb4d",
+ "name": "Replace fw filter classid",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action pipe classid 2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1 classid :2.*gact action pipe",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "67ec",
+ "name": "Replace fw filter index",
+ "category": [
+ "filter",
+ "fw"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC filter add dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 3"
+ ],
+ "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 prio 2 fw action ok index 16",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+ "matchPattern": "pref 2 fw.*handle 0x1.*gact action pass.*index 16",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ }
+]