@@ -10,28 +10,97 @@ struct meta {
10
10
int pkt_len ;
11
11
};
12
12
13
+ struct test_ctx_s {
14
+ bool passed ;
15
+ int pkt_size ;
16
+ };
17
+
18
+ struct test_ctx_s test_ctx ;
19
+
13
20
static void on_sample (void * ctx , int cpu , void * data , __u32 size )
14
21
{
15
22
struct meta * meta = (struct meta * )data ;
16
23
struct ipv4_packet * trace_pkt_v4 = data + sizeof (* meta );
24
+ unsigned char * raw_pkt = data + sizeof (* meta );
25
+ struct test_ctx_s * tst_ctx = ctx ;
17
26
18
27
ASSERT_GE (size , sizeof (pkt_v4 ) + sizeof (* meta ), "check_size" );
19
28
ASSERT_EQ (meta -> ifindex , if_nametoindex ("lo" ), "check_meta_ifindex" );
20
- ASSERT_EQ (meta -> pkt_len , sizeof ( pkt_v4 ) , "check_meta_pkt_len" );
29
+ ASSERT_EQ (meta -> pkt_len , tst_ctx -> pkt_size , "check_meta_pkt_len" );
21
30
ASSERT_EQ (memcmp (trace_pkt_v4 , & pkt_v4 , sizeof (pkt_v4 )), 0 ,
22
31
"check_packet_content" );
23
32
24
- * (bool * )ctx = true;
33
+ if (meta -> pkt_len > sizeof (pkt_v4 )) {
34
+ for (int i = 0 ; i < meta -> pkt_len - sizeof (pkt_v4 ); i ++ )
35
+ ASSERT_EQ (raw_pkt [i + sizeof (pkt_v4 )], (unsigned char )i ,
36
+ "check_packet_content" );
37
+ }
38
+
39
+ tst_ctx -> passed = true;
25
40
}
26
41
27
- void test_xdp_bpf2bpf (void )
42
+ #define BUF_SZ 9000
43
+
44
+ static void run_xdp_bpf2bpf_pkt_size (int pkt_fd , struct perf_buffer * pb ,
45
+ struct test_xdp_bpf2bpf * ftrace_skel ,
46
+ int pkt_size )
28
47
{
29
48
__u32 duration = 0 , retval , size ;
30
- char buf [128 ];
49
+ __u8 * buf , * buf_in ;
50
+ int err ;
51
+
52
+ if (!ASSERT_LE (pkt_size , BUF_SZ , "pkt_size" ) ||
53
+ !ASSERT_GE (pkt_size , sizeof (pkt_v4 ), "pkt_size" ))
54
+ return ;
55
+
56
+ buf_in = malloc (BUF_SZ );
57
+ if (!ASSERT_OK_PTR (buf_in , "buf_in malloc()" ))
58
+ return ;
59
+
60
+ buf = malloc (BUF_SZ );
61
+ if (!ASSERT_OK_PTR (buf , "buf malloc()" )) {
62
+ free (buf_in );
63
+ return ;
64
+ }
65
+
66
+ test_ctx .passed = false;
67
+ test_ctx .pkt_size = pkt_size ;
68
+
69
+ memcpy (buf_in , & pkt_v4 , sizeof (pkt_v4 ));
70
+ if (pkt_size > sizeof (pkt_v4 )) {
71
+ for (int i = 0 ; i < (pkt_size - sizeof (pkt_v4 )); i ++ )
72
+ buf_in [i + sizeof (pkt_v4 )] = i ;
73
+ }
74
+
75
+ /* Run test program */
76
+ err = bpf_prog_test_run (pkt_fd , 1 , buf_in , pkt_size ,
77
+ buf , & size , & retval , & duration );
78
+
79
+ ASSERT_OK (err , "ipv4" );
80
+ ASSERT_EQ (retval , XDP_PASS , "ipv4 retval" );
81
+ ASSERT_EQ (size , pkt_size , "ipv4 size" );
82
+
83
+ /* Make sure bpf_xdp_output() was triggered and it sent the expected
84
+ * data to the perf ring buffer.
85
+ */
86
+ err = perf_buffer__poll (pb , 100 );
87
+
88
+ ASSERT_GE (err , 0 , "perf_buffer__poll" );
89
+ ASSERT_TRUE (test_ctx .passed , "test passed" );
90
+ /* Verify test results */
91
+ ASSERT_EQ (ftrace_skel -> bss -> test_result_fentry , if_nametoindex ("lo" ),
92
+ "fentry result" );
93
+ ASSERT_EQ (ftrace_skel -> bss -> test_result_fexit , XDP_PASS , "fexit result" );
94
+
95
+ free (buf );
96
+ free (buf_in );
97
+ }
98
+
99
+ void test_xdp_bpf2bpf (void )
100
+ {
31
101
int err , pkt_fd , map_fd ;
32
- bool passed = false;
33
- struct iphdr iph ;
34
- struct iptnl_info value4 = {.family = AF_INET };
102
+ int pkt_sizes [] = {sizeof (pkt_v4 ), 1024 , 4100 , 8200 };
103
+ struct iptnl_info value4 = {.family = AF_INET6 };
35
104
struct test_xdp * pkt_skel = NULL ;
36
105
struct test_xdp_bpf2bpf * ftrace_skel = NULL ;
37
106
struct vip key4 = {.protocol = 6 , .family = AF_INET };
@@ -73,32 +142,14 @@ void test_xdp_bpf2bpf(void)
73
142
goto out ;
74
143
75
144
/* Set up perf buffer */
76
- pb = perf_buffer__new (bpf_map__fd (ftrace_skel -> maps .perf_buf_map ), 1 ,
77
- on_sample , NULL , & passed , NULL );
145
+ pb = perf_buffer__new (bpf_map__fd (ftrace_skel -> maps .perf_buf_map ), 8 ,
146
+ on_sample , NULL , & test_ctx , NULL );
78
147
if (!ASSERT_OK_PTR (pb , "perf_buf__new" ))
79
148
goto out ;
80
149
81
- /* Run test program */
82
- err = bpf_prog_test_run (pkt_fd , 1 , & pkt_v4 , sizeof (pkt_v4 ),
83
- buf , & size , & retval , & duration );
84
- memcpy (& iph , buf + sizeof (struct ethhdr ), sizeof (iph ));
85
-
86
- ASSERT_OK (err , "ipv4" );
87
- ASSERT_EQ (retval , XDP_TX , "ipv4 retval" );
88
- ASSERT_EQ (size , 74 , "ipv4 size" );
89
- ASSERT_EQ (iph .protocol , IPPROTO_IPIP , "ipv4 proto" );
90
-
91
- /* Make sure bpf_xdp_output() was triggered and it sent the expected
92
- * data to the perf ring buffer.
93
- */
94
- err = perf_buffer__poll (pb , 100 );
95
-
96
- ASSERT_GE (err , 0 , "perf_buffer__poll" );
97
- ASSERT_TRUE (passed , "test passed" );
98
- /* Verify test results */
99
- ASSERT_EQ (ftrace_skel -> bss -> test_result_fentry , if_nametoindex ("lo" ),
100
- "fentry result" );
101
- ASSERT_EQ (ftrace_skel -> bss -> test_result_fexit , XDP_TX , "fexit result" );
150
+ for (int i = 0 ; i < ARRAY_SIZE (pkt_sizes ); i ++ )
151
+ run_xdp_bpf2bpf_pkt_size (pkt_fd , pb , ftrace_skel ,
152
+ pkt_sizes [i ]);
102
153
out :
103
154
perf_buffer__free (pb );
104
155
test_xdp__destroy (pkt_skel );
0 commit comments