fix/chore: fix lpm batch update problem and remove BTF requirement

This commit is contained in:
mzz2017 2023-02-04 13:37:36 +08:00
parent 0d29b6fccc
commit 5d4210b030
5 changed files with 24 additions and 14 deletions

View File

@ -99,8 +99,9 @@ const (
var (
BasicFeatureVersion = internal.Version{5, 2, 0}
// Deprecated: Ftrace does not support arm64 yet (Linux 6.2).
FtraceFeatureVersion = internal.Version{5, 5, 0}
BatchUpdateFeatureVersion = internal.Version{5, 6, 0}
CgSocketCookieFeatureVersion = internal.Version{5, 7, 0}
ChecksumFeatureVersion = internal.Version{5, 8, 0}
FtraceFeatureVersion = internal.Version{5, 5, 0}
UserspaceBatchUpdateFeatureVersion = internal.Version{5, 6, 0}
CgSocketCookieFeatureVersion = internal.Version{5, 7, 0}
ChecksumFeatureVersion = internal.Version{5, 8, 0}
UserspaceBatchUpdateLpmTrieFeatureVersion = internal.Version{5, 13, 0}
)

View File

@ -70,7 +70,10 @@ func cidrToBpfLpmKey(prefix netip.Prefix) _bpfLpmKey {
func BatchUpdate(m *ebpf.Map, keys interface{}, values interface{}, opts *ebpf.BatchOptions) (n int, err error) {
var old bool
version, e := internal.KernelVersion()
if e != nil || version.Less(consts.BatchUpdateFeatureVersion) {
if e != nil || version.Less(consts.UserspaceBatchUpdateFeatureVersion) {
old = true
}
if m.Type() == ebpf.LPMTrie && version.Less(consts.UserspaceBatchUpdateLpmTrieFeatureVersion) {
old = true
}
if !old {

View File

@ -177,6 +177,11 @@ retryLoadBpf:
bpf: &bpf,
kernelVersion: &kernelVersion,
}
defer func() {
if err != nil {
_ = core.Close()
}
}()
// Bind to links. Binding should be advance of dialerGroups to avoid un-routable old connection.
for _, ifname := range lanInterface {

@ -1 +1 @@
Subproject commit 372c3cc61d2d907b89ebdfb7bec180a09cd28169
Subproject commit 69c6c597560e3c0c445b8b0824e9a6e4e7af7dc1

View File

@ -6,14 +6,14 @@
#include "headers/if_ether_defs.h"
#include "headers/pkt_cls_defs.h"
#include "headers/socket_defs.h"
#include "headers/bpf_probe_read.h"
#include "headers/vmlinux.h"
#include <asm-generic/errno-base.h>
#include <bpf/bpf_core_read.h>
// #include <bpf/bpf_core_read.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
// #define __DEBUG_ROUTING
// #define __PRINT_ROUTING_RESULT
@ -2044,8 +2044,8 @@ static int __always_inline update_map_elem_by_cookie(const __u64 cookie) {
__builtin_memset(&val, 0, sizeof(struct pid_pname));
char buf[MAX_ARG_SCANNER_BUFFER_SIZE] = {0};
struct task_struct *current = (void *)bpf_get_current_task();
unsigned long arg_start = BPF_CORE_READ(current, mm, arg_start);
unsigned long arg_end = BPF_CORE_READ(current, mm, arg_end);
unsigned long arg_start = BPF_PROBE_READ_KERNEL(current, mm, arg_start);
unsigned long arg_end = BPF_PROBE_READ_KERNEL(current, mm, arg_end);
unsigned long arg_len = arg_end - arg_start;
if (arg_len > MAX_ARG_LEN_TO_PROBE) {
arg_len = MAX_ARG_LEN_TO_PROBE;
@ -2075,7 +2075,8 @@ static int __always_inline update_map_elem_by_cookie(const __u64 cookie) {
} else {
buf[to_read] = 0;
}
if ((ret = bpf_core_read_user(&buf, to_read, arg_start + j))) {
if ((ret = bpf_probe_read_user(&buf, to_read,
(const void *)(arg_start + j)))) {
bpf_printk("failed to read process name: %d", ret);
return ret;
}
@ -2091,12 +2092,12 @@ static int __always_inline update_map_elem_by_cookie(const __u64 cookie) {
if (length_cpy > TASK_COMM_LEN) {
length_cpy = TASK_COMM_LEN;
}
if ((ret = bpf_core_read_user(&val.pname, length_cpy,
arg_start + last_slash))) {
if ((ret = bpf_probe_read_user(&val.pname, length_cpy,
(const void *)(arg_start + last_slash)))) {
bpf_printk("failed to read process name: %d", ret);
return ret;
}
val.pid = BPF_CORE_READ(current, tgid);
bpf_probe_read_kernel(&val.pid, sizeof(val.pid), &current->tgid);
// bpf_printk("a start_end: %lu %lu", arg_start, arg_end);
// bpf_printk("b start_end: %lu %lu", arg_start + last_slash, arg_start + j);