diff --git a/Makefile b/Makefile index 7fba0fd..b8a10e1 100644 --- a/Makefile +++ b/Makefile @@ -6,14 +6,25 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang +CLANG ?= clang-14 STRIP ?= llvm-strip -CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) +#CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) +CFLAGS := -O2 -Wall -Werror $(CFLAGS) + +# Get version from .git. +date=$(shell git log -1 --format="%cd" --date=short | sed s/-//g) +count=$(shell git rev-list --count HEAD) +commit=$(shell git rev-parse --short HEAD) +ifeq ($(wildcard .git/.),) + version=unstable-0.nogit +else + version=unstable-$(date).r$(count).$(commit) +endif .PHONY: ebpf dae dae: ebpf - go build -ldflags "-s -w" . + go build -ldflags "-s -w -X github.com/v2rayA/dae/cmd.Version=$(version)" . # $BPF_CLANG is used in go:generate invocations. ebpf: export BPF_CLANG := $(CLANG) diff --git a/README.md b/README.md index 615c541..3f66c31 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,9 @@ As a successor of [v2rayA](https://github.com/v2rayA/v2rayA), dae abandoned v2ra ## TODO -1. Dns upstream. Check dns upstream and source loop (whether upstream is also a client of us) and remind user to add source rule. +1. Check dns upstream and source loop (whether upstream is also a client of us) and remind the user to add sip rule. 1. Domain routing performance optimization. +1. Support not operator for RoutingA rule. +1. DisableL4Checksum by link. +1. Config file. 1. ... diff --git a/cmd/cmd.go b/cmd/cmd.go new file mode 100644 index 0000000..5b699d6 --- /dev/null +++ b/cmd/cmd.go @@ -0,0 +1,113 @@ +package cmd + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/v2rayA/dae/common/consts" + "github.com/v2rayA/dae/component/control" + "github.com/v2rayA/dae/component/outbound" + "github.com/v2rayA/dae/component/outbound/dialer" + "github.com/v2rayA/dae/pkg/logger" + "os" + "os/signal" + "syscall" +) + +var ( + v *viper.Viper + Version = "unknown" + verbose int + rootCmd = &cobra.Command{ + Use: "dae [flags] [command [argument ...]]", + Short: "dae is a lightweight and high-performance transparent proxy solution.", + Long: `dae is a lightweight and high-performance transparent proxy solution.`, + Version: Version, + Run: func(cmd *cobra.Command, args []string) { + const ( + tproxyPort = 12345 + ifname = "docker0" + ) + logrus.SetLevel(logrus.DebugLevel) + log := logger.NewLogger(2) + log.Println("Running") + + d, err := dialer.NewFromLink(log, "socks5://localhost:1080#proxy") + if err != nil { + panic(err) + } + group := outbound.NewDialerGroup(log, "proxy", + []*dialer.Dialer{d}, + outbound.DialerSelectionPolicy{ + Policy: consts.DialerSelectionPolicy_MinAverage10Latencies, + }) + t, err := control.NewControlPlane(log, []*outbound.DialerGroup{group}, ` +#sip(172.17.0.2)->proxy +#mac("02:42:ac:11:00:02")->block +#ipversion(4)->proxy +#l4proto(tcp)->proxy +#ip(119.29.29.29) -> proxy +#ip(223.5.5.5) -> direct +ip(geoip:cn) -> direct + +domain(full:google.com) && port(443) && l4proto(tcp) -> proxy + +domain(geosite:cn, suffix:"ip.sb") -> direct +#ip("91.105.192.0/23","91.108.4.0/22","91.108.8.0/21","91.108.16.0/21","91.108.56.0/22","95.161.64.0/20","149.154.160.0/20","185.76.151.0/24")->proxy +#domain(geosite:category-scholar-!cn, geosite:category-scholar-cn)->direct +final: proxy +`) + if err != nil { + panic(err) + } + if err = t.BindLink(ifname); err != nil { + panic(err) + } + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGKILL, syscall.SIGILL) + go func() { + if err := t.ListenAndServe(tproxyPort); err != nil { + log.Errorln("ListenAndServe:", err) + sigs <- nil + } + }() + <-sigs + if e := t.Close(); e != nil { + log.Errorln("Close control plane:", err) + } + }, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + rootCmd.PersistentFlags().CountVarP(&verbose, "verbose", "v", "verbose (-v, or -vv)") + + rootCmd.PersistentFlags().StringP("node", "n", "", "node share-link of your modern proxy") + rootCmd.PersistentFlags().StringP("subscription", "s", "", "subscription-link of your modern proxy") + rootCmd.PersistentFlags().Bool("noudp", false, "do not redirect UDP traffic, even though the proxy server supports") + rootCmd.PersistentFlags().String("testnode", "true", "test the connectivity before connecting to the node") + rootCmd.PersistentFlags().Bool("select", false, "manually select the node to connect from the subscription") + //rootCmd.AddCommand(configCmd) +} + +func NewLogger(verbose int) *logrus.Logger { + log := logrus.New() + + var level logrus.Level + switch verbose { + case 0: + level = logrus.WarnLevel + case 1: + level = logrus.InfoLevel + default: + level = logrus.TraceLevel + } + log.SetLevel(level) + + return log +} diff --git a/cmd/infra/su.go b/cmd/infra/su.go new file mode 100644 index 0000000..a59ed57 --- /dev/null +++ b/cmd/infra/su.go @@ -0,0 +1,43 @@ +package infra + +import ( + "fmt" + "github.com/sirupsen/logrus" + "os" + "os/exec" + "path/filepath" +) + +func AutoSu() { + if os.Getuid() == 0 { + return + } + program := filepath.Base(os.Args[0]) + pathSudo, err := exec.LookPath("sudo") + if err != nil { + // skip + return + } + // https://github.com/WireGuard/wireguard-tools/blob/71799a8f6d1450b63071a21cad6ed434b348d3d5/src/wg-quick/linux.bash#L85 + p, err := os.StartProcess(pathSudo, append([]string{ + pathSudo, + "-E", + "-p", + fmt.Sprintf("%v must be run as root. Please enter the password for %%u to continue: ", program), + "--", + }, os.Args...), &os.ProcAttr{ + Files: []*os.File{ + os.Stdin, + os.Stdout, + os.Stderr, + }, + }) + if err != nil { + logrus.Fatal(err) + } + stat, err := p.Wait() + if err != nil { + os.Exit(1) + } + os.Exit(stat.ExitCode()) +} \ No newline at end of file diff --git a/common/consts/ebpf.go b/common/consts/ebpf.go index f88af7b..12862fe 100644 --- a/common/consts/ebpf.go +++ b/common/consts/ebpf.go @@ -20,8 +20,6 @@ const ( BigEndianTproxyPortKey DisableL4TxChecksumKey DisableL4RxChecksumKey - EpochKey - RoutingsLenKey ) type DisableL4ChecksumPolicy uint32 @@ -50,6 +48,7 @@ type OutboundIndex uint8 const ( OutboundDirect OutboundIndex = 0 + OutboundBlock OutboundIndex = 1 OutboundControlPlaneDirect OutboundIndex = 0xFE OutboundLogicalAnd OutboundIndex = 0xFF ) @@ -58,6 +57,8 @@ func (i OutboundIndex) String() string { switch i { case OutboundDirect: return "direct" + case OutboundBlock: + return "block" case OutboundControlPlaneDirect: return "" case OutboundLogicalAnd: diff --git a/common/consts/routing.go b/common/consts/routing.go index 19a67eb..f66901b 100644 --- a/common/consts/routing.go +++ b/common/consts/routing.go @@ -19,4 +19,6 @@ const ( Function_Mac = "mac" Function_L4Proto = "l4proto" Function_IpVersion = "ipversion" + + Declaration_Final = "final" ) diff --git a/common/debug.go b/common/debug.go new file mode 100644 index 0000000..63b0f0e --- /dev/null +++ b/common/debug.go @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2023, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package common + +import ( + "github.com/sirupsen/logrus" + "os" + "path/filepath" + "strconv" + "strings" +) + +func ReportMemory(tag string) { + if !logrus.IsLevelEnabled(logrus.DebugLevel) { + return + } + b, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "status")) + if err != nil { + panic(err) + } + str := strings.TrimSpace(string(b)) + _, after, _ := strings.Cut(str, "VmHWM:") + usage, _, _ := strings.Cut(after, "\n") + logrus.Debugln(tag+": memory usage:", strings.TrimSpace(usage)) +} diff --git a/component/control/utils.go b/component/control/bpf_utils.go similarity index 94% rename from component/control/utils.go rename to component/control/bpf_utils.go index 8012444..83b05c1 100644 --- a/component/control/utils.go +++ b/component/control/bpf_utils.go @@ -16,7 +16,7 @@ type bpfLpmKey struct { Data [4]uint32 } -func (o *bpfObjects) NewLpmMap(keys []bpfLpmKey, values []uint32) (m *ebpf.Map, err error) { +func (o *bpfObjects) newLpmMap(keys []bpfLpmKey, values []uint32) (m *ebpf.Map, err error) { m, err = ebpf.NewMap(&ebpf.MapSpec{ Type: ebpf.LPMTrie, Flags: o.UnusedLpmType.Flags(), diff --git a/component/control/control.go b/component/control/control.go index da5c89e..c76693b 100644 --- a/component/control/control.go +++ b/component/control/control.go @@ -6,4 +6,4 @@ package control // $BPF_CLANG and $BPF_CFLAGS are set by the Makefile. -//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc $BPF_CLANG -strip $BPF_STRIP -cflags $BPF_CFLAGS bpf kern/tproxy.c -- -I../headers +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc $BPF_CLANG -strip $BPF_STRIP -cflags $BPF_CFLAGS bpf kern/tproxy.c -- diff --git a/component/control/control_plane.go b/component/control/control_plane.go index db4bdfc..cecebfa 100644 --- a/component/control/control_plane.go +++ b/component/control/control_plane.go @@ -42,31 +42,31 @@ type ControlPlane struct { Final string // mutex protects the dnsCache. - mutex sync.Mutex - dnsCache map[string]*dnsCache - // Deprecated - epoch uint32 + mutex sync.Mutex + dnsCache map[string]*dnsCache dnsUpstream netip.AddrPort deferFuncs []func() error } -func NewControlPlane(log *logrus.Logger, routingA string) (*ControlPlane, error) { +func NewControlPlane(log *logrus.Logger, dialerGroups []*outbound.DialerGroup, routingA string) (*ControlPlane, error) { // Allow the current process to lock memory for eBPF resources. if err := rlimit.RemoveMemlock(); err != nil { return nil, fmt.Errorf("rlimit.RemoveMemlock:%v", err) } pinPath := filepath.Join(consts.BpfPinRoot, consts.AppName) os.MkdirAll(pinPath, 0755) + // Load pre-compiled programs and maps into the kernel. var bpf bpfObjects -retry_load: +retryLoadBpf: if err := loadBpfObjects(&bpf, &ebpf.CollectionOptions{ Maps: ebpf.MapOptions{ PinPath: pinPath, }, }); err != nil { if errors.Is(err, ebpf.ErrMapIncompatible) { + // Map property is incompatible. Remove the old map and try again. prefix := "use pinned map " _, after, ok := strings.Cut(err.Error(), prefix) if !ok { @@ -75,72 +75,35 @@ retry_load: mapName, _, _ := strings.Cut(after, ":") _ = os.Remove(filepath.Join(pinPath, mapName)) log.Warnf("New map format was incompatible with existing map %v, and the old one was removed.", mapName) - goto retry_load + goto retryLoadBpf } return nil, fmt.Errorf("loading objects: %w", err) } - // Flush dst_map. - //_ = os.Remove(filepath.Join(pinPath, "dst_map")) - //if err := bpf.ParamMap.Update(consts.IpsLenKey, uint32(1), ebpf.UpdateAny); err != nil { - // return nil, err - //} - //if err := bpf.ParamMap.Update(consts.BigEndianTproxyPortKey, uint32(swap16(tproxyPort)), ebpf.UpdateAny); err != nil { - // return nil, err - //} + // Write params. if err := bpf.ParamMap.Update(consts.DisableL4TxChecksumKey, consts.DisableL4ChecksumPolicy_SetZero, ebpf.UpdateAny); err != nil { return nil, err } if err := bpf.ParamMap.Update(consts.DisableL4RxChecksumKey, consts.DisableL4ChecksumPolicy_SetZero, ebpf.UpdateAny); err != nil { return nil, err } - //var epoch uint32 - //bpf.ParamMap.Lookup(consts.EpochKey, &epoch) - //epoch++ - //if err := bpf.ParamMap.Update(consts.EpochKey, epoch, ebpf.UpdateAny); err != nil { - // return nil, err - //} - //if err := bpf.ParamMap.Update(consts.InterfaceIpParamOff, binary.LittleEndian.Uint32([]byte{172, 17, 0, 1}), ebpf.UpdateAny); err != nil { // 172.17.0.1 - // return nil, err - //} - //if err := bpf.ParamMap.Update(InterfaceIpParamOff+1, binary.LittleEndian.Uint32([]byte{10, 249, 40, 166}), ebpf.UpdateAny); err != nil { // 10.249.40.166 - // log.Println(err) - // return - //} - //if err := bpf.ParamMap.Update(InterfaceIpParamOff+2, binary.LittleEndian.Uint32([]byte{10, 250, 52, 180}), ebpf.UpdateAny); err != nil { // 10.250.52.180 - // log.Println(err) - // return - //} - cfDnsAddr := netip.AddrFrom4([4]byte{1, 1, 1, 1}) - cfDnsAddr16 := cfDnsAddr.As16() - cfDnsPort := uint16(53) - if err := bpf.DnsUpstreamMap.Update(consts.ZeroKey, bpfIpPort{ - Ip: common.Ipv6ByteSliceToUint32Array(cfDnsAddr16[:]), - Port: swap16(cfDnsPort), - }, ebpf.UpdateAny); err != nil { - return nil, err - } - - /**/ - // TODO: - d, err := dialer.NewFromLink("socks5://localhost:1080#proxy") - if err != nil { - return nil, err - } + // DialerGroups (outbounds). outbounds := []*outbound.DialerGroup{ outbound.NewDialerGroup(log, consts.OutboundDirect.String(), - []*dialer.Dialer{dialer.FullconeDirectDialer}, + []*dialer.Dialer{dialer.NewDirectDialer(log, true)}, outbound.DialerSelectionPolicy{ Policy: consts.DialerSelectionPolicy_Fixed, FixedIndex: 0, }), - outbound.NewDialerGroup(log, "proxy", - []*dialer.Dialer{d}, + outbound.NewDialerGroup(log, consts.OutboundBlock.String(), + []*dialer.Dialer{dialer.NewBlockDialer(log)}, outbound.DialerSelectionPolicy{ - Policy: consts.DialerSelectionPolicy_MinAverage10Latencies, + Policy: consts.DialerSelectionPolicy_Fixed, + FixedIndex: 0, }), } + outbounds = append(outbounds, dialerGroups...) // Generate outboundName2Id from outbounds. if len(outbounds) > 0xff { return nil, fmt.Errorf("too many outbounds") @@ -177,7 +140,17 @@ retry_load: if err := builder.Build(); err != nil { return nil, fmt.Errorf("RoutingMatcherBuilder.Build: %w", err) } - /**/ + + // DNS upstream. + cfDnsAddr := netip.AddrFrom4([4]byte{1, 1, 1, 1}) + cfDnsAddr16 := cfDnsAddr.As16() + cfDnsPort := uint16(53) + if err := bpf.DnsUpstreamMap.Update(consts.ZeroKey, bpfIpPort{ + Ip: common.Ipv6ByteSliceToUint32Array(cfDnsAddr16[:]), + Port: swap16(cfDnsPort), + }, ebpf.UpdateAny); err != nil { + return nil, err + } return &ControlPlane{ log: log, @@ -190,8 +163,7 @@ retry_load: mutex: sync.Mutex{}, dnsCache: make(map[string]*dnsCache), dnsUpstream: netip.AddrPortFrom(cfDnsAddr, cfDnsPort), - //epoch: epoch, - deferFuncs: []func() error{bpf.Close}, + deferFuncs: []func() error{bpf.Close}, }, nil } @@ -235,9 +207,26 @@ func (c *ControlPlane) BindLink(ifname string) error { break } } - if err := c.bpf.IfindexIpMap.Update(uint32(link.Attrs().Index), linkIp, ebpf.UpdateAny); err != nil { + if err := c.bpf.IfindexTproxyIpMap.Update(uint32(link.Attrs().Index), linkIp, ebpf.UpdateAny); err != nil { return fmt.Errorf("update IfindexIpsMap: %w", err) } + // FIXME: not only this link ip. + if linkIp.HasIp4 { + if err := c.bpf.HostIpLpm.Update(bpfLpmKey{ + PrefixLen: 128, + Data: linkIp.Ip4, + }, uint32(1), ebpf.UpdateAny); err != nil { + return fmt.Errorf("update IfindexIpsMap: %w", err) + } + } + if linkIp.HasIp6 { + if err := c.bpf.HostIpLpm.Update(bpfLpmKey{ + PrefixLen: 128, + Data: linkIp.Ip6, + }, uint32(1), ebpf.UpdateAny); err != nil { + return fmt.Errorf("update IfindexIpsMap: %w", err) + } + } // Insert qdisc and filters. qdisc := &netlink.GenericQdisc{ diff --git a/component/control/dns.go b/component/control/dns.go index f9e1afc..0ad1348 100644 --- a/component/control/dns.go +++ b/component/control/dns.go @@ -7,9 +7,9 @@ package control import ( "fmt" + "github.com/cilium/ebpf" "github.com/v2rayA/dae/common" "github.com/v2rayA/dae/common/consts" - "github.com/cilium/ebpf" "golang.org/x/net/dns/dnsmessage" "net/netip" "strings" @@ -53,7 +53,6 @@ func (c *ControlPlane) BatchUpdateDomainRouting(cache *dnsCache) error { keys = append(keys, common.Ipv6ByteSliceToUint32Array(ip6[:])) vals = append(vals, bpfDomainRouting{ Bitmap: cache.DomainBitmap, - Epoch: c.epoch, }) } if _, err := c.bpf.DomainRoutingMap.BatchUpdate(keys, vals, &ebpf.BatchOptions{ diff --git a/component/control/match.go b/component/control/domain_match.go similarity index 95% rename from component/control/match.go rename to component/control/domain_match.go index f50d7ed..c36ff52 100644 --- a/component/control/match.go +++ b/component/control/domain_match.go @@ -12,7 +12,7 @@ import ( ) func (c *ControlPlane) MatchDomainBitmap(domain string) (bitmap [consts.MaxRoutingLen / 32]uint32) { - // FIXME: high performance implementation. + // TODO: high performance implementation. for _, s := range c.SimulatedDomainSet { for _, d := range s.Domains { var hit bool diff --git a/component/control/kern/bpf_endian.h b/component/control/kern/bpf_endian.h new file mode 100644 index 0000000..3262354 --- /dev/null +++ b/component/control/kern/bpf_endian.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) + +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) +# define __bpf_ntohl(x) __builtin_bswap32(x) +# define __bpf_htonl(x) __builtin_bswap32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) +# define __bpf_be64_to_cpu(x) __builtin_bswap64(x) +# define __bpf_cpu_to_be64(x) __builtin_bswap64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) +# define __bpf_ntohl(x) (x) +# define __bpf_htonl(x) (x) +# define __bpf_constant_ntohl(x) (x) +# define __bpf_constant_htonl(x) (x) +# define __bpf_be64_to_cpu(x) (x) +# define __bpf_cpu_to_be64(x) (x) +# define __bpf_constant_be64_to_cpu(x) (x) +# define __bpf_constant_cpu_to_be64(x) (x) +#else +# error "Fix your compiler's __BYTE_ORDER__?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) +#define bpf_htonl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_htonl(x) : __bpf_htonl(x)) +#define bpf_ntohl(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_ntohl(x) : __bpf_ntohl(x)) +#define bpf_cpu_to_be64(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) +#define bpf_be64_to_cpu(x) \ + (__builtin_constant_p(x) ? \ + __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) + +#endif /* __BPF_ENDIAN__ */ \ No newline at end of file diff --git a/component/control/kern/bpf_helper_defs.h b/component/control/kern/bpf_helper_defs.h new file mode 100644 index 0000000..04f350f --- /dev/null +++ b/component/control/kern/bpf_helper_defs.h @@ -0,0 +1,4139 @@ +// Copied from https://github.com/cilium/ebpf/blob/e0ada270a5/examples/headers/bpf_helper_defs.h +/* This is auto-generated file. See bpf_doc.py for details. */ + +/* Forward declarations of BPF structs */ +struct bpf_fib_lookup; +struct bpf_sk_lookup; +struct bpf_perf_event_data; +struct bpf_perf_event_value; +struct bpf_pidns_info; +struct bpf_redir_neigh; +struct bpf_sock; +struct bpf_sock_addr; +struct bpf_sock_ops; +struct bpf_sock_tuple; +struct bpf_spin_lock; +struct bpf_sysctl; +struct bpf_tcp_sock; +struct bpf_tunnel_key; +struct bpf_xfrm_state; +struct linux_binprm; +struct pt_regs; +struct sk_reuseport_md; +struct sockaddr; +struct tcphdr; +struct seq_file; +struct tcp6_sock; +struct tcp_sock; +struct tcp_timewait_sock; +struct tcp_request_sock; +struct udp6_sock; +struct unix_sock; +struct task_struct; +struct __sk_buff; +struct sk_msg_md; +struct xdp_md; +struct path; +struct btf_ptr; +struct inode; +struct socket; +struct file; +struct bpf_timer; + +/* + * bpf_map_lookup_elem + * + * Perform a lookup in *map* for an entry associated to *key*. + * + * Returns + * Map value associated to *key*, or **NULL** if no entry was + * found. + */ +static void *(*bpf_map_lookup_elem)(void *map, const void *key) = (void *) 1; + +/* + * bpf_map_update_elem + * + * Add or update the value of the entry associated to *key* in + * *map* with *value*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * Flag value **BPF_NOEXIST** cannot be used for maps of types + * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all + * elements always exist), the helper would return an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; + +/* + * bpf_map_delete_elem + * + * Delete entry with *key* from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_delete_elem)(void *map, const void *key) = (void *) 3; + +/* + * bpf_probe_read + * + * For tracing programs, safely attempt to read *size* bytes from + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use **bpf_probe_read_user**\ () or + * **bpf_probe_read_kernel**\ () instead. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 4; + +/* + * bpf_ktime_get_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does not include time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_ns)(void) = (void *) 5; + +/* + * bpf_trace_printk + * + * This helper is a "printk()-like" facility for debugging. It + * prints a message defined by format *fmt* (of size *fmt_size*) + * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if + * available. It can take up to three additional **u64** + * arguments (as an eBPF helpers, the total number of arguments is + * limited to five). + * + * Each time the helper is called, it appends a line to the trace. + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. + * The format of the trace is customizable, and the exact output + * one will get depends on the options set in + * *\/sys/kernel/debug/tracing/trace_options* (see also the + * *README* file under the same directory). However, it usually + * defaults to something like: + * + * :: + * + * telnet-470 [001] .N.. 419421.045894: 0x00000001: + * + * In the above: + * + * * ``telnet`` is the name of the current task. + * * ``470`` is the PID of the current task. + * * ``001`` is the CPU number on which the task is + * running. + * * In ``.N..``, each character refers to a set of + * options (whether irqs are enabled, scheduling + * options, whether hard/softirqs are running, level of + * preempt_disabled respectively). **N** means that + * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** + * are set. + * * ``419421.045894`` is a timestamp. + * * ``0x00000001`` is a fake value used by BPF for the + * instruction pointer register. + * * ```` is the message formatted with + * *fmt*. + * + * The conversion specifiers supported by *fmt* are similar, but + * more limited than for printk(). They are **%d**, **%i**, + * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, + * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size + * of field, padding with zeroes, etc.) is available, and the + * helper will return **-EINVAL** (but print nothing) if it + * encounters an unknown specifier. + * + * Also, note that **bpf_trace_printk**\ () is slow, and should + * only be used for debugging purposes. For this reason, a notice + * block (spanning several lines) is printed to kernel logs and + * states that the helper should not be used "for production use" + * the first time this helper is used (or more precisely, when + * **trace_printk**\ () buffers are allocated). For passing values + * to user space, perf events should be preferred. + * + * Returns + * The number of bytes written to the buffer, or a negative error + * in case of failure. + */ +static long (*bpf_trace_printk)(const char *fmt, __u32 fmt_size, ...) = (void *) 6; + +/* + * bpf_get_prandom_u32 + * + * Get a pseudo-random number. + * + * From a security point of view, this helper uses its own + * pseudo-random internal state, and cannot be used to infer the + * seed of other random functions in the kernel. However, it is + * essential to note that the generator used by the helper is not + * cryptographically secure. + * + * Returns + * A random 32-bit unsigned value. + */ +static __u32 (*bpf_get_prandom_u32)(void) = (void *) 7; + +/* + * bpf_get_smp_processor_id + * + * Get the SMP (symmetric multiprocessing) processor id. Note that + * all programs run with migration disabled, which means that the + * SMP processor id is stable during all the execution of the + * program. + * + * Returns + * The SMP id of the processor running the program. + */ +static __u32 (*bpf_get_smp_processor_id)(void) = (void *) 8; + +/* + * bpf_skb_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. *flags* are a combination of + * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the + * checksum for the packet after storing the bytes) and + * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ + * **->swhash** and *skb*\ **->l4hash** to 0). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len, __u64 flags) = (void *) 9; + +/* + * bpf_l3_csum_replace + * + * Recompute the layer 3 (e.g. IP) checksum for the packet + * associated to *skb*. Computation is incremental, so the helper + * must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored in *size*. + * Alternatively, it is possible to store the difference between + * the previous and the new values of the header field in *to*, by + * setting *from* and *size* to 0. For both methods, *offset* + * indicates the location of the IP checksum within the packet. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l3_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 size) = (void *) 10; + +/* + * bpf_l4_csum_replace + * + * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the + * packet associated to *skb*. Computation is incremental, so the + * helper must know the former value of the header field that was + * modified (*from*), the new value of this field (*to*), and the + * number of bytes (2 or 4) for this field, stored on the lowest + * four bits of *flags*. Alternatively, it is possible to store + * the difference between the previous and the new values of the + * header field in *to*, by setting *from* and the four lowest + * bits of *flags* to 0. For both methods, *offset* indicates the + * location of the IP checksum within the packet. In addition to + * the size of the field, *flags* can be added (bitwise OR) actual + * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left + * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and + * for updates resulting in a null checksum the value is set to + * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates + * the checksum is to be computed against a pseudo-header. + * + * This helper works in combination with **bpf_csum_diff**\ (), + * which does not update the checksum in-place, but offers more + * flexibility and can handle sizes larger than 2 or 4 for the + * checksum to update. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_l4_csum_replace)(struct __sk_buff *skb, __u32 offset, __u64 from, __u64 to, __u64 flags) = (void *) 11; + +/* + * bpf_tail_call + * + * This special helper is used to trigger a "tail call", or in + * other words, to jump into another eBPF program. The same stack + * frame is used (but values on stack and in registers for the + * caller are not accessible to the callee). This mechanism allows + * for program chaining, either for raising the maximum number of + * available eBPF instructions, or to execute given programs in + * conditional blocks. For security reasons, there is an upper + * limit to the number of successive tail calls that can be + * performed. + * + * Upon call of this helper, the program attempts to jump into a + * program referenced at index *index* in *prog_array_map*, a + * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes + * *ctx*, a pointer to the context. + * + * If the call succeeds, the kernel immediately runs the first + * instruction of the new program. This is not a function call, + * and it never returns to the previous program. If the call + * fails, then the helper has no effect, and the caller continues + * to run its subsequent instructions. A call can fail if the + * destination program for the jump does not exist (i.e. *index* + * is superior to the number of entries in *prog_array_map*), or + * if the maximum number of tail calls has been reached for this + * chain of programs. This limit is defined in the kernel by the + * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), + * which is currently set to 33. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tail_call)(void *ctx, void *prog_array_map, __u32 index) = (void *) 12; + +/* + * bpf_clone_redirect + * + * Clone and redirect the packet associated to *skb* to another + * net device of index *ifindex*. Both ingress and egress + * interfaces can be used for redirection. The **BPF_F_INGRESS** + * value in *flags* is used to make the distinction (ingress path + * is selected if the flag is present, egress path otherwise). + * This is the only flag supported for now. + * + * In comparison with **bpf_redirect**\ () helper, + * **bpf_clone_redirect**\ () has the associated cost of + * duplicating the packet buffer, but this can be executed out of + * the eBPF program. Conversely, **bpf_redirect**\ () is more + * efficient, but it is handled through an action code where the + * redirection happens only after the eBPF program has returned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_clone_redirect)(struct __sk_buff *skb, __u32 ifindex, __u64 flags) = (void *) 13; + +/* + * bpf_get_current_pid_tgid + * + * + * Returns + * A 64-bit integer containing the current tgid and pid, and + * created as such: + * *current_task*\ **->tgid << 32 \|** + * *current_task*\ **->pid**. + */ +static __u64 (*bpf_get_current_pid_tgid)(void) = (void *) 14; + +/* + * bpf_get_current_uid_gid + * + * + * Returns + * A 64-bit integer containing the current GID and UID, and + * created as such: *current_gid* **<< 32 \|** *current_uid*. + */ +static __u64 (*bpf_get_current_uid_gid)(void) = (void *) 15; + +/* + * bpf_get_current_comm + * + * Copy the **comm** attribute of the current task into *buf* of + * *size_of_buf*. The **comm** attribute contains the name of + * the executable (excluding the path) for the current task. The + * *size_of_buf* must be strictly positive. On success, the + * helper makes sure that the *buf* is NUL-terminated. On failure, + * it is filled with zeroes. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_get_current_comm)(void *buf, __u32 size_of_buf) = (void *) 16; + +/* + * bpf_get_cgroup_classid + * + * Retrieve the classid for the current task, i.e. for the net_cls + * cgroup to which *skb* belongs. + * + * This helper can be used on TC egress path, but not on ingress. + * + * The net_cls cgroup provides an interface to tag network packets + * based on a user-provided identifier for all traffic coming from + * the tasks belonging to the related cgroup. See also the related + * kernel documentation, available from the Linux sources in file + * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. + * + * The Linux kernel has two versions for cgroups: there are + * cgroups v1 and cgroups v2. Both are available to users, who can + * use a mixture of them, but note that the net_cls cgroup is for + * cgroup v1 only. This makes it incompatible with BPF programs + * run on cgroups, which is a cgroup-v2-only feature (a socket can + * only hold data for one version of cgroups at a time). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to + * "**y**" or to "**m**". + * + * Returns + * The classid, or 0 for the default unconfigured classid. + */ +static __u32 (*bpf_get_cgroup_classid)(struct __sk_buff *skb) = (void *) 17; + +/* + * bpf_skb_vlan_push + * + * Push a *vlan_tci* (VLAN tag control information) of protocol + * *vlan_proto* to the packet associated to *skb*, then update + * the checksum. Note that if *vlan_proto* is different from + * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to + * be **ETH_P_8021Q**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_push)(struct __sk_buff *skb, __be16 vlan_proto, __u16 vlan_tci) = (void *) 18; + +/* + * bpf_skb_vlan_pop + * + * Pop a VLAN header from the packet associated to *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_vlan_pop)(struct __sk_buff *skb) = (void *) 19; + +/* + * bpf_skb_get_tunnel_key + * + * Get tunnel metadata. This helper takes a pointer *key* to an + * empty **struct bpf_tunnel_key** of **size**, that will be + * filled with tunnel metadata for the packet associated to *skb*. + * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which + * indicates that the tunnel is based on IPv6 protocol instead of + * IPv4. + * + * The **struct bpf_tunnel_key** is an object that generalizes the + * principal parameters used by various tunneling protocols into a + * single struct. This way, it can be used to easily make a + * decision based on the contents of the encapsulation header, + * "summarized" in this struct. In particular, it holds the IP + * address of the remote end (IPv4 or IPv6, depending on the case) + * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, + * this struct exposes the *key*\ **->tunnel_id**, which is + * generally mapped to a VNI (Virtual Network Identifier), making + * it programmable together with the **bpf_skb_set_tunnel_key**\ + * () helper. + * + * Let's imagine that the following code is part of a program + * attached to the TC ingress interface, on one end of a GRE + * tunnel, and is supposed to filter out all messages coming from + * remote ends with IPv4 address other than 10.0.0.1: + * + * :: + * + * int ret; + * struct bpf_tunnel_key key = {}; + * + * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + * if (ret < 0) + * return TC_ACT_SHOT; // drop packet + * + * if (key.remote_ipv4 != 0x0a000001) + * return TC_ACT_SHOT; // drop packet + * + * return TC_ACT_OK; // accept packet + * + * This interface can also be used with all encapsulation devices + * that can operate in "collect metadata" mode: instead of having + * one network device per specific configuration, the "collect + * metadata" mode only requires a single device where the + * configuration can be extracted from this helper. + * + * This can be used together with various tunnels such as VXLan, + * Geneve, GRE or IP in IP (IPIP). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 20; + +/* + * bpf_skb_set_tunnel_key + * + * Populate tunnel metadata for packet associated to *skb.* The + * tunnel metadata is set to the contents of *key*, of *size*. The + * *flags* can be set to a combination of the following values: + * + * **BPF_F_TUNINFO_IPV6** + * Indicate that the tunnel is based on IPv6 protocol + * instead of IPv4. + * **BPF_F_ZERO_CSUM_TX** + * For IPv4 packets, add a flag to tunnel metadata + * indicating that checksum computation should be skipped + * and checksum set to zeroes. + * **BPF_F_DONT_FRAGMENT** + * Add a flag to tunnel metadata indicating that the + * packet should not be fragmented. + * **BPF_F_SEQ_NUMBER** + * Add a flag to tunnel metadata indicating that a + * sequence number should be added to tunnel header before + * sending the packet. This flag was added for GRE + * encapsulation, but might be used with other protocols + * as well in the future. + * + * Here is a typical usage on the transmit path: + * + * :: + * + * struct bpf_tunnel_key key; + * populate key ... + * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); + * + * See also the description of the **bpf_skb_get_tunnel_key**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_key)(struct __sk_buff *skb, struct bpf_tunnel_key *key, __u32 size, __u64 flags) = (void *) 21; + +/* + * bpf_perf_event_read + * + * Read the value of a perf event counter. This helper relies on a + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of + * the perf event counter is selected when *map* is updated with + * perf event file descriptors. The *map* is an array whose size + * is the number of available CPUs, and each cell contains a value + * relative to one CPU. The value to retrieve is indicated by + * *flags*, that contains the index of the CPU to look up, masked + * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * Note that before Linux 4.13, only hardware perf event can be + * retrieved. + * + * Also, be aware that the newer helper + * **bpf_perf_event_read_value**\ () is recommended over + * **bpf_perf_event_read**\ () in general. The latter has some ABI + * quirks where error and counter value are used as a return code + * (which is wrong to do since ranges may overlap). This issue is + * fixed with **bpf_perf_event_read_value**\ (), which at the same + * time provides more features over the **bpf_perf_event_read**\ + * () interface. Please refer to the description of + * **bpf_perf_event_read_value**\ () for details. + * + * Returns + * The value of the perf event counter read from the map, or a + * negative error code in case of failure. + */ +static __u64 (*bpf_perf_event_read)(void *map, __u64 flags) = (void *) 22; + +/* + * bpf_redirect + * + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_clone_redirect**\ + * (), except that the packet is not cloned, which provides + * increased performance. + * + * Except for XDP, both ingress and egress interfaces can be used + * for redirection. The **BPF_F_INGRESS** value in *flags* is used + * to make the distinction (ingress path is selected if the flag + * is present, egress path otherwise). Currently, XDP only + * supports redirection to the egress interface, and accepts no + * flag at all. + * + * The same effect can also be attained with the more generic + * **bpf_redirect_map**\ (), which uses a BPF map to store the + * redirect target instead of providing it directly to the helper. + * + * Returns + * For XDP, the helper returns **XDP_REDIRECT** on success or + * **XDP_ABORTED** on error. For other program types, the values + * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on + * error. + */ +static long (*bpf_redirect)(__u32 ifindex, __u64 flags) = (void *) 23; + +/* + * bpf_get_route_realm + * + * Retrieve the realm or the route, that is to say the + * **tclassid** field of the destination for the *skb*. The + * identifier retrieved is a user-provided tag, similar to the + * one used with the net_cls cgroup (see description for + * **bpf_get_cgroup_classid**\ () helper), but here this tag is + * held by a route (a destination entry), not by a task. + * + * Retrieving this identifier works with the clsact TC egress hook + * (see also **tc-bpf(8)**), or alternatively on conventional + * classful egress qdiscs, but not on TC ingress path. In case of + * clsact TC egress hook, this has the advantage that, internally, + * the destination entry has not been dropped yet in the transmit + * path. Therefore, the destination entry does not need to be + * artificially held via **netif_keep_dst**\ () for a classful + * qdisc until the *skb* is freed. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_IP_ROUTE_CLASSID** configuration option. + * + * Returns + * The realm of the route for the packet associated to *skb*, or 0 + * if none was found. + */ +static __u32 (*bpf_get_route_realm)(struct __sk_buff *skb) = (void *) 24; + +/* + * bpf_perf_event_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * The context of the program *ctx* needs also be passed to the + * helper. + * + * On user space, a program willing to read the values needs to + * call **perf_event_open**\ () on the perf event (either for + * one or for all CPUs) and to store the file descriptor into the + * *map*. This must be done before the eBPF program can send data + * into it. An example is available in file + * *samples/bpf/trace_output_user.c* in the Linux kernel source + * tree (the eBPF program counterpart is in + * *samples/bpf/trace_output_kern.c*). + * + * **bpf_perf_event_output**\ () achieves better performance + * than **bpf_trace_printk**\ () for sharing data with user + * space, and is much better suitable for streaming data from eBPF + * programs. + * + * Note that this helper is not restricted to tracing use cases + * and can be used with programs attached to TC or XDP as well, + * where it allows for passing data to user space listeners. Data + * can be: + * + * * Only custom structs, + * * Only the packet payload, or + * * A combination of both. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 25; + +/* + * bpf_skb_load_bytes + * + * This helper was provided as an easy way to load data from a + * packet. It can be used to load *len* bytes from *offset* from + * the packet associated to *skb*, into the buffer pointed by + * *to*. + * + * Since Linux 4.7, usage of this helper has mostly been replaced + * by "direct packet access", enabling packet data to be + * manipulated with *skb*\ **->data** and *skb*\ **->data_end** + * pointing respectively to the first byte of packet data and to + * the byte after the last byte of packet data. However, it + * remains useful if one wishes to read large quantities of data + * at once from a packet into the eBPF stack. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes)(const void *skb, __u32 offset, void *to, __u32 len) = (void *) 26; + +/* + * bpf_get_stackid + * + * Walk a user or a kernel stack and return its id. To achieve + * this, the helper needs *ctx*, which is a pointer to the context + * on which the tracing program is executed, and a pointer to a + * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * a combination of the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_FAST_STACK_CMP** + * Compare stacks by hash only. + * **BPF_F_REUSE_STACKID** + * If two different stacks hash into the same *stackid*, + * discard the old one. + * + * The stack id retrieved is a 32 bit long integer handle which + * can be further combined with other data (including other stack + * ids) and used as a key into maps. This can be useful for + * generating a variety of graphs (such as flame graphs or off-cpu + * graphs). + * + * For walking a stack, this helper is an improvement over + * **bpf_probe_read**\ (), which can be used with unrolled loops + * but is not efficient and consumes a lot of eBPF instructions. + * Instead, **bpf_get_stackid**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * The positive or null stack id on success, or a negative error + * in case of failure. + */ +static long (*bpf_get_stackid)(void *ctx, void *map, __u64 flags) = (void *) 27; + +/* + * bpf_csum_diff + * + * Compute a checksum difference, from the raw buffer pointed by + * *from*, of length *from_size* (that must be a multiple of 4), + * towards the raw buffer pointed by *to*, of size *to_size* + * (same remark). An optional *seed* can be added to the value + * (this can be cascaded, the seed may come from a previous call + * to the helper). + * + * This is flexible enough to be used in several ways: + * + * * With *from_size* == 0, *to_size* > 0 and *seed* set to + * checksum, it can be used when pushing new data. + * * With *from_size* > 0, *to_size* == 0 and *seed* set to + * checksum, it can be used when removing data from a packet. + * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it + * can be used to compute a diff. Note that *from_size* and + * *to_size* do not need to be equal. + * + * This helper can be used in combination with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to + * which one can feed in the difference computed with + * **bpf_csum_diff**\ (). + * + * Returns + * The checksum result, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_diff)(__be32 *from, __u32 from_size, __be32 *to, __u32 to_size, __wsum seed) = (void *) 28; + +/* + * bpf_skb_get_tunnel_opt + * + * Retrieve tunnel options metadata for the packet associated to + * *skb*, and store the raw tunnel option data to the buffer *opt* + * of *size*. + * + * This helper can be used with encapsulation devices that can + * operate in "collect metadata" mode (please refer to the related + * note in the description of **bpf_skb_get_tunnel_key**\ () for + * more details). A particular example where this can be used is + * in combination with the Geneve encapsulation protocol, where it + * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) + * and retrieving arbitrary TLVs (Type-Length-Value headers) from + * the eBPF program. This allows for full customization of these + * headers. + * + * Returns + * The size of the option data retrieved. + */ +static long (*bpf_skb_get_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 29; + +/* + * bpf_skb_set_tunnel_opt + * + * Set tunnel options metadata for the packet associated to *skb* + * to the option data contained in the raw buffer *opt* of *size*. + * + * See also the description of the **bpf_skb_get_tunnel_opt**\ () + * helper for additional information. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_set_tunnel_opt)(struct __sk_buff *skb, void *opt, __u32 size) = (void *) 30; + +/* + * bpf_skb_change_proto + * + * Change the protocol of the *skb* to *proto*. Currently + * supported are transition from IPv4 to IPv6, and from IPv6 to + * IPv4. The helper takes care of the groundwork for the + * transition, including resizing the socket buffer. The eBPF + * program is expected to fill the new headers, if any, via + * **skb_store_bytes**\ () and to recompute the checksums with + * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ + * (). The main case for this helper is to perform NAT64 + * operations out of an eBPF program. + * + * Internally, the GSO type is marked as dodgy so that headers are + * checked and segments are recalculated by the GSO/GRO engine. + * The size for GSO target is adapted as well. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_proto)(struct __sk_buff *skb, __be16 proto, __u64 flags) = (void *) 31; + +/* + * bpf_skb_change_type + * + * Change the packet type for the packet associated to *skb*. This + * comes down to setting *skb*\ **->pkt_type** to *type*, except + * the eBPF program does not have a write access to *skb*\ + * **->pkt_type** beside this helper. Using a helper here allows + * for graceful handling of errors. + * + * The major use case is to change incoming *skb*s to + * **PACKET_HOST** in a programmatic way instead of having to + * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for + * example. + * + * Note that *type* only allows certain values. At this time, they + * are: + * + * **PACKET_HOST** + * Packet is for us. + * **PACKET_BROADCAST** + * Send packet to all. + * **PACKET_MULTICAST** + * Send packet to group. + * **PACKET_OTHERHOST** + * Send packet to someone else. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_type)(struct __sk_buff *skb, __u32 type) = (void *) 32; + +/* + * bpf_skb_under_cgroup + * + * Check whether *skb* is a descendant of the cgroup2 held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 0, if the *skb* failed the cgroup2 descendant test. + * * 1, if the *skb* succeeded the cgroup2 descendant test. + * * A negative error code, if an error occurred. + */ +static long (*bpf_skb_under_cgroup)(struct __sk_buff *skb, void *map, __u32 index) = (void *) 33; + +/* + * bpf_get_hash_recalc + * + * Retrieve the hash of the packet, *skb*\ **->hash**. If it is + * not set, in particular if the hash was cleared due to mangling, + * recompute this hash. Later accesses to the hash can be done + * directly with *skb*\ **->hash**. + * + * Calling **bpf_set_hash_invalid**\ (), changing a packet + * prototype with **bpf_skb_change_proto**\ (), or calling + * **bpf_skb_store_bytes**\ () with the + * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear + * the hash and to trigger a new computation for the next call to + * **bpf_get_hash_recalc**\ (). + * + * Returns + * The 32-bit hash. + */ +static __u32 (*bpf_get_hash_recalc)(struct __sk_buff *skb) = (void *) 34; + +/* + * bpf_get_current_task + * + * + * Returns + * A pointer to the current task struct. + */ +static __u64 (*bpf_get_current_task)(void) = (void *) 35; + +/* + * bpf_probe_write_user + * + * Attempt in a safe way to write *len* bytes from the buffer + * *src* to *dst* in memory. It only works for threads that are in + * user context, and *dst* must be a valid user space address. + * + * This helper should not be used to implement any kind of + * security mechanism because of TOC-TOU attacks, but rather to + * debug, divert, and manipulate execution of semi-cooperative + * processes. + * + * Keep in mind that this feature is meant for experiments, and it + * has a risk of crashing the system and running programs. + * Therefore, when an eBPF program using this helper is attached, + * a warning including PID and process name is printed to kernel + * logs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_write_user)(void *dst, const void *src, __u32 len) = (void *) 36; + +/* + * bpf_current_task_under_cgroup + * + * Check whether the probe is being run is the context of a given + * subset of the cgroup2 hierarchy. The cgroup2 to test is held by + * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. + * + * Returns + * The return value depends on the result of the test, and can be: + * + * * 0, if current task belongs to the cgroup2. + * * 1, if current task does not belong to the cgroup2. + * * A negative error code, if an error occurred. + */ +static long (*bpf_current_task_under_cgroup)(void *map, __u32 index) = (void *) 37; + +/* + * bpf_skb_change_tail + * + * Resize (trim or grow) the packet associated to *skb* to the + * new *len*. The *flags* are reserved for future usage, and must + * be left at zero. + * + * The basic idea is that the helper performs the needed work to + * change the size of the packet, then the eBPF program rewrites + * the rest via helpers like **bpf_skb_store_bytes**\ (), + * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () + * and others. This helper is a slow path utility intended for + * replies with control messages. And because it is targeted for + * slow path, the helper itself can afford to be slow: it + * implicitly linearizes, unclones and drops offloads from the + * *skb*. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_tail)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 38; + +/* + * bpf_skb_pull_data + * + * Pull in non-linear data in case the *skb* is non-linear and not + * all of *len* are part of the linear section. Make *len* bytes + * from *skb* readable and writable. If a zero value is passed for + * *len*, then the whole length of the *skb* is pulled. + * + * This helper is only needed for reading and writing with direct + * packet access. + * + * For direct packet access, testing that offsets to access + * are within packet boundaries (test on *skb*\ **->data_end**) is + * susceptible to fail if offsets are invalid, or if the requested + * data is in non-linear parts of the *skb*. On failure the + * program can just bail out, or in the case of a non-linear + * buffer, use a helper to make the data available. The + * **bpf_skb_load_bytes**\ () helper is a first solution to access + * the data. Another one consists in using **bpf_skb_pull_data** + * to pull in once the non-linear parts, then retesting and + * eventually access the data. + * + * At the same time, this also makes sure the *skb* is uncloned, + * which is a necessary condition for direct write. As this needs + * to be an invariant for the write part only, the verifier + * detects writes and adds a prologue that is calling + * **bpf_skb_pull_data()** to effectively unclone the *skb* from + * the very beginning in case it is indeed cloned. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_pull_data)(struct __sk_buff *skb, __u32 len) = (void *) 39; + +/* + * bpf_csum_update + * + * Add the checksum *csum* into *skb*\ **->csum** in case the + * driver has supplied a checksum for the entire packet into that + * field. Return an error otherwise. This helper is intended to be + * used in combination with **bpf_csum_diff**\ (), in particular + * when the checksum needs to be updated after data has been + * written into the packet through direct packet access. + * + * Returns + * The checksum on success, or a negative error code in case of + * failure. + */ +static __s64 (*bpf_csum_update)(struct __sk_buff *skb, __wsum csum) = (void *) 40; + +/* + * bpf_set_hash_invalid + * + * Invalidate the current *skb*\ **->hash**. It can be used after + * mangling on headers through direct packet access, in order to + * indicate that the hash is outdated and to trigger a + * recalculation the next time the kernel tries to access this + * hash or when the **bpf_get_hash_recalc**\ () helper is called. + * + */ +static void (*bpf_set_hash_invalid)(struct __sk_buff *skb) = (void *) 41; + +/* + * bpf_get_numa_node_id + * + * Return the id of the current NUMA node. The primary use case + * for this helper is the selection of sockets for the local NUMA + * node, when the program is attached to sockets using the + * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), + * but the helper is also available to other eBPF program types, + * similarly to **bpf_get_smp_processor_id**\ (). + * + * Returns + * The id of current NUMA node. + */ +static long (*bpf_get_numa_node_id)(void) = (void *) 42; + +/* + * bpf_skb_change_head + * + * Grows headroom of packet associated to *skb* and adjusts the + * offset of the MAC header accordingly, adding *len* bytes of + * space. It automatically extends and reallocates memory as + * required. + * + * This helper can be used on a layer 3 *skb* to push a MAC header + * for redirection into a layer 2 device. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_change_head)(struct __sk_buff *skb, __u32 len, __u64 flags) = (void *) 43; + +/* + * bpf_xdp_adjust_head + * + * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that + * it is possible to use a negative value for *delta*. This helper + * can be used to prepare the packet for pushing or popping + * headers. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_head)(struct xdp_md *xdp_md, int delta) = (void *) 44; + +/* + * bpf_probe_read_str + * + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for + * more details. + * + * Generally, use **bpf_probe_read_user_str**\ () or + * **bpf_probe_read_kernel_str**\ () instead. + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 45; + +/* + * bpf_get_socket_cookie + * + * If the **struct sk_buff** pointed by *skb* has a known socket, + * retrieve the cookie (generated by the kernel) of this socket. + * If no cookie has been set yet, generate a new cookie. Once + * generated, the socket cookie remains stable for the life of the + * socket. This helper can be useful for monitoring per socket + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. + * + * Returns + * A 8-byte long unique number on success, or 0 if the socket + * field is missing inside *skb*. + */ +static __u64 (*bpf_get_socket_cookie)(void *ctx) = (void *) 46; + +/* + * bpf_get_socket_uid + * + * + * Returns + * The owner UID of the socket associated to *skb*. If the socket + * is **NULL**, or if it is not a full socket (i.e. if it is a + * time-wait or a request socket instead), **overflowuid** value + * is returned (note that **overflowuid** might also be the actual + * UID value for the socket). + */ +static __u32 (*bpf_get_socket_uid)(struct __sk_buff *skb) = (void *) 47; + +/* + * bpf_set_hash + * + * Set the full hash for *skb* (set the field *skb*\ **->hash**) + * to value *hash*. + * + * Returns + * 0 + */ +static long (*bpf_set_hash)(struct __sk_buff *skb, __u32 hash) = (void *) 48; + +/* + * bpf_setsockopt + * + * Emulate a call to **setsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **setsockopt(2)** for more information. + * The option value of length *optlen* is pointed by *optval*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **setsockopt()**. + * It supports the following *level*\ s: + * + * * **SOL_SOCKET**, which supports the following *optname*\ s: + * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. + * * **IPPROTO_TCP**, which supports the following *optname*\ s: + * **TCP_CONGESTION**, **TCP_BPF_IW**, + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_setsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 49; + +/* + * bpf_skb_adjust_room + * + * Grow or shrink the room for data in the packet associated to + * *skb* by *len_diff*, and according to the selected *mode*. + * + * By default, the helper will reset any offloaded checksum + * indicator of the skb to CHECKSUM_NONE. This can be avoided + * by the following flag: + * + * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded + * checksum data of the skb to CHECKSUM_NONE. + * + * There are two supported modes at this time: + * + * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer + * (room space is added or removed below the layer 2 header). + * + * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer + * (room space is added or removed below the layer 3 header). + * + * The following flags are supported at this time: + * + * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. + * Adjusting mss in this way is not allowed for datagrams. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, + * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: + * Any new space is reserved to hold a tunnel header. + * Configure skb offsets and other fields accordingly. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, + * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: + * Use with ENCAP_L3 flags to further specify the tunnel type. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): + * Use with ENCAP_L3/L4 flags to further specify the tunnel + * type; *len* is the length of the inner MAC header. + * + * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: + * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the + * L2 type as Ethernet. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_adjust_room)(struct __sk_buff *skb, __s32 len_diff, __u32 mode, __u64 flags) = (void *) 50; + +/* + * bpf_redirect_map + * + * Redirect the packet to the endpoint referenced by *map* at + * index *key*. Depending on its type, this *map* can contain + * references to net devices (for forwarding packets through other + * ports), or to CPUs (for redirecting XDP frames to another CPU; + * but this is only implemented for native XDP (with driver + * support) as of this writing). + * + * The lower two bits of *flags* are used as the return code if + * the map lookup fails. This is so that the return value can be + * one of the XDP program return codes up to **XDP_TX**, as chosen + * by the caller. The higher bits of *flags* can be set to + * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below. + * + * With BPF_F_BROADCAST the packet will be broadcasted to all the + * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress + * interface will be excluded when do broadcasting. + * + * See also **bpf_redirect**\ (), which only supports redirecting + * to an ifindex, but doesn't require a map to do so. + * + * Returns + * **XDP_REDIRECT** on success, or the value of the two lower bits + * of the *flags* argument on error. + */ +static long (*bpf_redirect_map)(void *map, __u32 key, __u64 flags) = (void *) 51; + +/* + * bpf_sk_redirect_map + * + * Redirect the packet to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_map)(struct __sk_buff *skb, void *map, __u32 key, __u64 flags) = (void *) 52; + +/* + * bpf_sock_map_update + * + * Add an entry to, or update a *map* referencing sockets. The + * *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_map_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 53; + +/* + * bpf_xdp_adjust_meta + * + * Adjust the address pointed by *xdp_md*\ **->data_meta** by + * *delta* (which can be positive or negative). Note that this + * operation modifies the address stored in *xdp_md*\ **->data**, + * so the latter must be loaded only after the helper has been + * called. + * + * The use of *xdp_md*\ **->data_meta** is optional and programs + * are not required to use it. The rationale is that when the + * packet is processed with XDP (e.g. as DoS filter), it is + * possible to push further meta data along with it before passing + * to the stack, and to give the guarantee that an ingress eBPF + * program attached as a TC classifier on the same device can pick + * this up for further post-processing. Since TC works with socket + * buffers, it remains possible to set from XDP the **mark** or + * **priority** pointers, or other pointers for the socket buffer. + * Having this scratch space generic and programmable allows for + * more flexibility as the user is free to store whatever meta + * data they need. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_meta)(struct xdp_md *xdp_md, int delta) = (void *) 54; + +/* + * bpf_perf_event_read_value + * + * Read the value of a perf event counter, and store it into *buf* + * of size *buf_size*. This helper relies on a *map* of type + * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event + * counter is selected when *map* is updated with perf event file + * descriptors. The *map* is an array whose size is the number of + * available CPUs, and each cell contains a value relative to one + * CPU. The value to retrieve is indicated by *flags*, that + * contains the index of the CPU to look up, masked with + * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to + * **BPF_F_CURRENT_CPU** to indicate that the value for the + * current CPU should be retrieved. + * + * This helper behaves in a way close to + * **bpf_perf_event_read**\ () helper, save that instead of + * just returning the value observed, it fills the *buf* + * structure. This allows for additional data to be retrieved: in + * particular, the enabled and running times (in *buf*\ + * **->enabled** and *buf*\ **->running**, respectively) are + * copied. In general, **bpf_perf_event_read_value**\ () is + * recommended over **bpf_perf_event_read**\ (), which has some + * ABI issues and provides fewer functionalities. + * + * These values are interesting, because hardware PMU (Performance + * Monitoring Unit) counters are limited resources. When there are + * more PMU based perf events opened than available counters, + * kernel will multiplex these events so each event gets certain + * percentage (but not all) of the PMU time. In case that + * multiplexing happens, the number of samples or counter value + * will not reflect the case compared to when no multiplexing + * occurs. This makes comparison between different runs difficult. + * Typically, the counter value should be normalized before + * comparing to other experiments. The usual normalization is done + * as follows. + * + * :: + * + * normalized_counter = counter * t_enabled / t_running + * + * Where t_enabled is the time enabled for event and t_running is + * the time running for event since last normalization. The + * enabled and running times are accumulated since the perf event + * open. To achieve scaling factor between two invocations of an + * eBPF program, users can use CPU id as the key (which is + * typical for perf array usage model) to remember the previous + * value and do the calculation inside the eBPF program. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_event_read_value)(void *map, __u64 flags, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 55; + +/* + * bpf_perf_prog_read_value + * + * For en eBPF program attached to a perf event, retrieve the + * value of the event counter associated to *ctx* and store it in + * the structure pointed by *buf* and of size *buf_size*. Enabled + * and running times are also stored in the structure (see + * description of helper **bpf_perf_event_read_value**\ () for + * more details). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_perf_prog_read_value)(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, __u32 buf_size) = (void *) 56; + +/* + * bpf_getsockopt + * + * Emulate a call to **getsockopt()** on the socket associated to + * *bpf_socket*, which must be a full socket. The *level* at + * which the option resides and the name *optname* of the option + * must be specified, see **getsockopt(2)** for more information. + * The retrieved value is stored in the structure pointed by + * *opval* and of length *optlen*. + * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * + * This helper actually implements a subset of **getsockopt()**. + * It supports the following *level*\ s: + * + * * **IPPROTO_TCP**, which supports *optname* + * **TCP_CONGESTION**. + * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. + * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_getsockopt)(void *bpf_socket, int level, int optname, void *optval, int optlen) = (void *) 57; + +/* + * bpf_override_return + * + * Used for error injection, this helper uses kprobes to override + * the return value of the probed function, and to set it to *rc*. + * The first argument is the context *regs* on which the kprobe + * works. + * + * This helper works by setting the PC (program counter) + * to an override function which is run in place of the original + * probed function. This means the probed function is not run at + * all. The replacement function just returns with the required + * value. + * + * This helper has security implications, and thus is subject to + * restrictions. It is only available if the kernel was compiled + * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration + * option, and in this case it only works on functions tagged with + * **ALLOW_ERROR_INJECTION** in the kernel code. + * + * Also, the helper is only available for the architectures having + * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, + * x86 architecture is the only one to support this feature. + * + * Returns + * 0 + */ +static long (*bpf_override_return)(struct pt_regs *regs, __u64 rc) = (void *) 58; + +/* + * bpf_sock_ops_cb_flags_set + * + * Attempt to set the value of the **bpf_sock_ops_cb_flags** field + * for the full TCP socket associated to *bpf_sock_ops* to + * *argval*. + * + * The primary use of this field is to determine if there should + * be calls to eBPF programs of type + * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP + * code. A program of the same type can change its value, per + * connection and as necessary, when the connection is + * established. This field is directly accessible for reading, but + * this helper must be used for updates in order to return an + * error if an eBPF program tries to set a callback that is not + * supported in the current kernel. + * + * *argval* is a flag array which can combine these flags: + * + * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) + * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) + * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) + * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) + * + * Therefore, this function can be used to clear a callback flag by + * setting the appropriate bit to zero. e.g. to disable the RTO + * callback: + * + * **bpf_sock_ops_cb_flags_set(bpf_sock,** + * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** + * + * Here are some examples of where one could call such eBPF + * program: + * + * * When RTO fires. + * * When a packet is retransmitted. + * * When the connection terminates. + * * When a packet is sent. + * * When a packet is received. + * + * Returns + * Code **-EINVAL** if the socket is not a full TCP socket; + * otherwise, a positive number containing the bits that could not + * be set is returned (which comes down to 0 if all bits were set + * as required). + */ +static long (*bpf_sock_ops_cb_flags_set)(struct bpf_sock_ops *bpf_sock, int argval) = (void *) 59; + +/* + * bpf_msg_redirect_map + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_map)(struct sk_msg_md *msg, void *map, __u32 key, __u64 flags) = (void *) 60; + +/* + * bpf_msg_apply_bytes + * + * For socket policies, apply the verdict of the eBPF program to + * the next *bytes* (number of bytes) of message *msg*. + * + * For example, this helper can be used in the following cases: + * + * * A single **sendmsg**\ () or **sendfile**\ () system call + * contains multiple logical messages that the eBPF program is + * supposed to read and for which it should apply a verdict. + * * An eBPF program only cares to read the first *bytes* of a + * *msg*. If the message has a large payload, then setting up + * and calling the eBPF program repeatedly for all bytes, even + * though the verdict is already known, would create unnecessary + * overhead. + * + * When called from within an eBPF program, the helper sets a + * counter internal to the BPF infrastructure, that is used to + * apply the last verdict to the next *bytes*. If *bytes* is + * smaller than the current data being processed from a + * **sendmsg**\ () or **sendfile**\ () system call, the first + * *bytes* will be sent and the eBPF program will be re-run with + * the pointer for start of data pointing to byte number *bytes* + * **+ 1**. If *bytes* is larger than the current data being + * processed, then the eBPF verdict will be applied to multiple + * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are + * consumed. + * + * Note that if a socket closes with the internal counter holding + * a non-zero value, this is not a problem because data is not + * being buffered for *bytes* and is sent as it is received. + * + * Returns + * 0 + */ +static long (*bpf_msg_apply_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 61; + +/* + * bpf_msg_cork_bytes + * + * For socket policies, prevent the execution of the verdict eBPF + * program for message *msg* until *bytes* (byte number) have been + * accumulated. + * + * This can be used when one needs a specific number of bytes + * before a verdict can be assigned, even if the data spans + * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme + * case would be a user calling **sendmsg**\ () repeatedly with + * 1-byte long message segments. Obviously, this is bad for + * performance, but it is still valid. If the eBPF program needs + * *bytes* bytes to validate a header, this helper can be used to + * prevent the eBPF program to be called again until *bytes* have + * been accumulated. + * + * Returns + * 0 + */ +static long (*bpf_msg_cork_bytes)(struct sk_msg_md *msg, __u32 bytes) = (void *) 62; + +/* + * bpf_msg_pull_data + * + * For socket policies, pull in non-linear data from user space + * for *msg* and set pointers *msg*\ **->data** and *msg*\ + * **->data_end** to *start* and *end* bytes offsets into *msg*, + * respectively. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it can only parse data that the (**data**, **data_end**) + * pointers have already consumed. For **sendmsg**\ () hooks this + * is likely the first scatterlist element. But for calls relying + * on the **sendpage** handler (e.g. **sendfile**\ ()) this will + * be the range (**0**, **0**) because the data is shared with + * user space and by default the objective is to avoid allowing + * user space to modify data while (or after) eBPF verdict is + * being decided. This helper can be used to pull in data and to + * set the start and end pointer to given values. Data will be + * copied if necessary (i.e. if data was not linear and if start + * and end pointers do not point to the same chunk). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pull_data)(struct sk_msg_md *msg, __u32 start, __u32 end, __u64 flags) = (void *) 63; + +/* + * bpf_bind + * + * Bind the socket associated to *ctx* to the address pointed by + * *addr*, of length *addr_len*. This allows for making outgoing + * connection from the desired IP address, which can be useful for + * example when all processes inside a cgroup should use one + * single IP address on a host that has multiple IP configured. + * + * This helper works for IPv4 and IPv6, TCP and UDP sockets. The + * domain (*addr*\ **->sa_family**) must be **AF_INET** (or + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_bind)(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) = (void *) 64; + +/* + * bpf_xdp_adjust_tail + * + * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is + * possible to both shrink and grow the packet tail. + * Shrink done via *delta* being a negative integer. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_adjust_tail)(struct xdp_md *xdp_md, int delta) = (void *) 65; + +/* + * bpf_skb_get_xfrm_state + * + * Retrieve the XFRM state (IP transform framework, see also + * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. + * + * The retrieved value is stored in the **struct bpf_xfrm_state** + * pointed by *xfrm_state* and of length *size*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_XFRM** configuration option. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_get_xfrm_state)(struct __sk_buff *skb, __u32 index, struct bpf_xfrm_state *xfrm_state, __u32 size, __u64 flags) = (void *) 66; + +/* + * bpf_get_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *ctx*, which is a pointer + * to the context on which the tracing program is executed. + * To store the stacktrace, the bpf program provides *buf* with + * a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + */ +static long (*bpf_get_stack)(void *ctx, void *buf, __u32 size, __u64 flags) = (void *) 67; + +/* + * bpf_skb_load_bytes_relative + * + * This helper is similar to **bpf_skb_load_bytes**\ () in that + * it provides an easy way to load *len* bytes from *offset* + * from the packet associated to *skb*, into the buffer pointed + * by *to*. The difference to **bpf_skb_load_bytes**\ () is that + * a fifth argument *start_header* exists in order to select a + * base offset to start from. *start_header* can be one of: + * + * **BPF_HDR_START_MAC** + * Base offset to load data from is *skb*'s mac header. + * **BPF_HDR_START_NET** + * Base offset to load data from is *skb*'s network header. + * + * In general, "direct packet access" is the preferred method to + * access packet data, however, this helper is in particular useful + * in socket filters where *skb*\ **->data** does not always point + * to the start of the mac header and where "direct packet access" + * is not available. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_load_bytes_relative)(const void *skb, __u32 offset, void *to, __u32 len, __u32 start_header) = (void *) 68; + +/* + * bpf_fib_lookup + * + * Do FIB lookup in kernel tables using parameters in *params*. + * If lookup is successful and result shows packet is to be + * forwarded, the neighbor tables are searched for the nexthop. + * If successful (ie., FIB lookup shows forwarding and nexthop + * is resolved), the nexthop address is returned in ipv4_dst + * or ipv6_dst based on family, smac is set to mac address of + * egress device, dmac is set to nexthop mac address, rt_metric + * is set to metric from route (IPv4/IPv6 only), and ifindex + * is set to the device index of the nexthop from the FIB lookup. + * + * *plen* argument is the size of the passed in struct. + * *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_FIB_LOOKUP_DIRECT** + * Do a direct table lookup vs full lookup using FIB + * rules. + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. + * + * Returns + * * < 0 if any input argument is invalid + * * 0 on success (packet is forwarded, nexthop neighbor exists) + * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the + * packet is not forwarded or needs assist from full stack + * + * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU + * was exceeded and output params->mtu_result contains the MTU. + */ +static long (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params, int plen, __u32 flags) = (void *) 69; + +/* + * bpf_sock_hash_update + * + * Add an entry to, or update a sockhash *map* referencing sockets. + * The *skops* is used as a new value for the entry associated to + * *key*. *flags* is one of: + * + * **BPF_NOEXIST** + * The entry for *key* must not exist in the map. + * **BPF_EXIST** + * The entry for *key* must already exist in the map. + * **BPF_ANY** + * No condition on the existence of the entry for *key*. + * + * If the *map* has eBPF programs (parser and verdict), those will + * be inherited by the socket being added. If the socket is + * already attached to eBPF programs, this results in an error. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sock_hash_update)(struct bpf_sock_ops *skops, void *map, void *key, __u64 flags) = (void *) 70; + +/* + * bpf_msg_redirect_hash + * + * This helper is used in programs implementing policies at the + * socket level. If the message *msg* is allowed to pass (i.e. if + * the verdict eBPF program returns **SK_PASS**), redirect it to + * the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress path otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_msg_redirect_hash)(struct sk_msg_md *msg, void *map, void *key, __u64 flags) = (void *) 71; + +/* + * bpf_sk_redirect_hash + * + * This helper is used in programs implementing policies at the + * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. + * if the verdict eBPF program returns **SK_PASS**), redirect it + * to the socket referenced by *map* (of type + * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and + * egress interfaces can be used for redirection. The + * **BPF_F_INGRESS** value in *flags* is used to make the + * distinction (ingress path is selected if the flag is present, + * egress otherwise). This is the only flag supported for now. + * + * Returns + * **SK_PASS** on success, or **SK_DROP** on error. + */ +static long (*bpf_sk_redirect_hash)(struct __sk_buff *skb, void *map, void *key, __u64 flags) = (void *) 72; + +/* + * bpf_lwt_push_encap + * + * Encapsulate the packet associated to *skb* within a Layer 3 + * protocol header. This header is provided in the buffer at + * address *hdr*, with *len* its size in bytes. *type* indicates + * the protocol of the header and can be one of: + * + * **BPF_LWT_ENCAP_SEG6** + * IPv6 encapsulation with Segment Routing Header + * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, + * the IPv6 header is computed by the kernel. + * **BPF_LWT_ENCAP_SEG6_INLINE** + * Only works if *skb* contains an IPv6 packet. Insert a + * Segment Routing Header (**struct ipv6_sr_hdr**) inside + * the IPv6 header. + * **BPF_LWT_ENCAP_IP** + * IP encapsulation (GRE/GUE/IPIP/etc). The outer header + * must be IPv4 or IPv6, followed by zero or more + * additional headers, up to **LWT_BPF_MAX_HEADROOM** + * total bytes in all prepended headers. Please note that + * if **skb_is_gso**\ (*skb*) is true, no more than two + * headers can be prepended, and the inner header, if + * present, should be either GRE or UDP/GUE. + * + * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs + * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can + * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and + * **BPF_PROG_TYPE_LWT_XMIT**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_push_encap)(struct __sk_buff *skb, __u32 type, void *hdr, __u32 len) = (void *) 73; + +/* + * bpf_lwt_seg6_store_bytes + * + * Store *len* bytes from address *from* into the packet + * associated to *skb*, at *offset*. Only the flags, tag and TLVs + * inside the outermost IPv6 Segment Routing Header can be + * modified through this helper. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_store_bytes)(struct __sk_buff *skb, __u32 offset, const void *from, __u32 len) = (void *) 74; + +/* + * bpf_lwt_seg6_adjust_srh + * + * Adjust the size allocated to TLVs in the outermost IPv6 + * Segment Routing Header contained in the packet associated to + * *skb*, at position *offset* by *delta* bytes. Only offsets + * after the segments are accepted. *delta* can be as well + * positive (growing) as negative (shrinking). + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_adjust_srh)(struct __sk_buff *skb, __u32 offset, __s32 delta) = (void *) 75; + +/* + * bpf_lwt_seg6_action + * + * Apply an IPv6 Segment Routing action of type *action* to the + * packet associated to *skb*. Each action takes a parameter + * contained at address *param*, and of length *param_len* bytes. + * *action* can be one of: + * + * **SEG6_LOCAL_ACTION_END_X** + * End.X action: Endpoint with Layer-3 cross-connect. + * Type of *param*: **struct in6_addr**. + * **SEG6_LOCAL_ACTION_END_T** + * End.T action: Endpoint with specific IPv6 table lookup. + * Type of *param*: **int**. + * **SEG6_LOCAL_ACTION_END_B6** + * End.B6 action: Endpoint bound to an SRv6 policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * **SEG6_LOCAL_ACTION_END_B6_ENCAP** + * End.B6.Encap action: Endpoint bound to an SRv6 + * encapsulation policy. + * Type of *param*: **struct ipv6_sr_hdr**. + * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be + * performed again, if the helper is used in combination with + * direct packet access. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_lwt_seg6_action)(struct __sk_buff *skb, __u32 action, void *param, __u32 param_len) = (void *) 76; + +/* + * bpf_rc_repeat + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded repeat key message. This delays + * the generation of a key up event for previously generated + * key down event. + * + * Some IR protocols like NEC have a special IR message for + * repeating last button, for when a button is held down. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_repeat)(void *ctx) = (void *) 77; + +/* + * bpf_rc_keydown + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded key press with *scancode*, + * *toggle* value in the given *protocol*. The scancode will be + * translated to a keycode using the rc keymap, and reported as + * an input key down event. After a period a key up event is + * generated. This period can be extended by calling either + * **bpf_rc_keydown**\ () again with the same values, or calling + * **bpf_rc_repeat**\ (). + * + * Some protocols include a toggle bit, in case the button was + * released and pressed again between consecutive scancodes. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * The *protocol* is the decoded protocol number (see + * **enum rc_proto** for some predefined values). + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_keydown)(void *ctx, __u32 protocol, __u64 scancode, __u32 toggle) = (void *) 78; + +/* + * bpf_skb_cgroup_id + * + * Return the cgroup v2 id of the socket associated with the *skb*. + * This is roughly similar to the **bpf_get_cgroup_classid**\ () + * helper for cgroup v1 by providing a tag resp. identifier that + * can be matched on or used for map lookups e.g. to implement + * policy. The cgroup v2 id of a given path in the hierarchy is + * exposed in user space through the f_handle API in order to get + * to the same 64-bit id. + * + * This helper can be used on TC egress path, but not on ingress, + * and is available only if the kernel was compiled with the + * **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_cgroup_id)(struct __sk_buff *skb) = (void *) 79; + +/* + * bpf_get_current_cgroup_id + * + * + * Returns + * A 64-bit integer containing the current cgroup id based + * on the cgroup within which the current task is running. + */ +static __u64 (*bpf_get_current_cgroup_id)(void) = (void *) 80; + +/* + * bpf_get_local_storage + * + * Get the pointer to the local storage area. + * The type and the size of the local storage is defined + * by the *map* argument. + * The *flags* meaning is specific for each map type, + * and has to be 0 for cgroup local storage. + * + * Depending on the BPF program type, a local storage area + * can be shared between multiple instances of the BPF program, + * running simultaneously. + * + * A user should care about the synchronization by himself. + * For example, by using the **BPF_ATOMIC** instructions to alter + * the shared data. + * + * Returns + * A pointer to the local storage area. + */ +static void *(*bpf_get_local_storage)(void *map, __u64 flags) = (void *) 81; + +/* + * bpf_sk_select_reuseport + * + * Select a **SO_REUSEPORT** socket from a + * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*. + * It checks the selected socket is matching the incoming + * request in the socket buffer. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_select_reuseport)(struct sk_reuseport_md *reuse, void *map, void *key, __u64 flags) = (void *) 82; + +/* + * bpf_skb_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *skb* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *skb*, then return value will be same as that + * of **bpf_skb_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *skb*. + * + * The format of returned id and helper limitations are same as in + * **bpf_skb_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_ancestor_cgroup_id)(struct __sk_buff *skb, int ancestor_level) = (void *) 83; + +/* + * bpf_sk_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 84; + +/* + * bpf_sk_lookup_udp + * + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 85; + +/* + * bpf_sk_release + * + * Release the reference held by *sock*. *sock* must be a + * non-**NULL** pointer that was returned from + * **bpf_sk_lookup_xxx**\ (). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_sk_release)(void *sock) = (void *) 86; + +/* + * bpf_map_push_elem + * + * Push an element *value* in *map*. *flags* is one of: + * + * **BPF_EXIST** + * If the queue/stack is full, the oldest element is + * removed to make room for this. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_push_elem)(void *map, const void *value, __u64 flags) = (void *) 87; + +/* + * bpf_map_pop_elem + * + * Pop an element from *map*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_pop_elem)(void *map, void *value) = (void *) 88; + +/* + * bpf_map_peek_elem + * + * Get an element from *map* without removing it. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_map_peek_elem)(void *map, void *value) = (void *) 89; + +/* + * bpf_msg_push_data + * + * For socket policies, insert *len* bytes into *msg* at offset + * *start*. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it may want to insert metadata or options into the *msg*. + * This can later be read and used by any of the lower layer BPF + * hooks. + * + * This helper may fail if under memory pressure (a malloc + * fails) in these cases BPF programs will get an appropriate + * error and BPF programs will need to handle them. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_push_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 90; + +/* + * bpf_msg_pop_data + * + * Will remove *len* bytes from a *msg* starting at byte *start*. + * This may result in **ENOMEM** errors under certain situations if + * an allocation and copy are required due to a full ring buffer. + * However, the helper will try to avoid doing the allocation + * if possible. Other errors can occur if input parameters are + * invalid either due to *start* byte not being valid part of *msg* + * payload and/or *pop* value being to large. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_msg_pop_data)(struct sk_msg_md *msg, __u32 start, __u32 len, __u64 flags) = (void *) 91; + +/* + * bpf_rc_pointer_rel + * + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * + * Returns + * 0 + */ +static long (*bpf_rc_pointer_rel)(void *ctx, __s32 rel_x, __s32 rel_y) = (void *) 92; + +/* + * bpf_spin_lock + * + * Acquire a spinlock represented by the pointer *lock*, which is + * stored as part of a value of a map. Taking the lock allows to + * safely update the rest of the fields in that value. The + * spinlock can (and must) later be released with a call to + * **bpf_spin_unlock**\ (\ *lock*\ ). + * + * Spinlocks in BPF programs come with a number of restrictions + * and constraints: + * + * * **bpf_spin_lock** objects are only allowed inside maps of + * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this + * list could be extended in the future). + * * BTF description of the map is mandatory. + * * The BPF program can take ONE lock at a time, since taking two + * or more could cause dead locks. + * * Only one **struct bpf_spin_lock** is allowed per map element. + * * When the lock is taken, calls (either BPF to BPF or helpers) + * are not allowed. + * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not + * allowed inside a spinlock-ed region. + * * The BPF program MUST call **bpf_spin_unlock**\ () to release + * the lock, on all execution paths, before it returns. + * * The BPF program can access **struct bpf_spin_lock** only via + * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () + * helpers. Loading or storing data into the **struct + * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. + * * To use the **bpf_spin_lock**\ () helper, the BTF description + * of the map value must be a struct and have **struct + * bpf_spin_lock** *anyname*\ **;** field at the top level. + * Nested lock inside another struct is not allowed. + * * The **struct bpf_spin_lock** *lock* field in a map value must + * be aligned on a multiple of 4 bytes in that value. + * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy + * the **bpf_spin_lock** field to user space. + * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from + * a BPF program, do not update the **bpf_spin_lock** field. + * * **bpf_spin_lock** cannot be on the stack or inside a + * networking packet (it can only be inside of a map values). + * * **bpf_spin_lock** is available to root only. + * * Tracing programs and socket filter programs cannot use + * **bpf_spin_lock**\ () due to insufficient preemption checks + * (but this may change in the future). + * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. + * + * Returns + * 0 + */ +static long (*bpf_spin_lock)(struct bpf_spin_lock *lock) = (void *) 93; + +/* + * bpf_spin_unlock + * + * Release the *lock* previously locked by a call to + * **bpf_spin_lock**\ (\ *lock*\ ). + * + * Returns + * 0 + */ +static long (*bpf_spin_unlock)(struct bpf_spin_lock *lock) = (void *) 94; + +/* + * bpf_sk_fullsock + * + * This helper gets a **struct bpf_sock** pointer such + * that all the fields in this **bpf_sock** can be accessed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) = (void *) 95; + +/* + * bpf_tcp_sock + * + * This helper gets a **struct bpf_tcp_sock** pointer from a + * **struct bpf_sock** pointer. + * + * Returns + * A **struct bpf_tcp_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = (void *) 96; + +/* + * bpf_skb_ecn_set_ce + * + * Set ECN (Explicit Congestion Notification) field of IP header + * to **CE** (Congestion Encountered) if current value is **ECT** + * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 + * and IPv4. + * + * Returns + * 1 if the **CE** flag is set (either by the current helper call + * or because it was already present), 0 if it is not set. + */ +static long (*bpf_skb_ecn_set_ce)(struct __sk_buff *skb) = (void *) 97; + +/* + * bpf_get_listener_sock + * + * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. + * **bpf_sk_release**\ () is unnecessary and not allowed. + * + * Returns + * A **struct bpf_sock** pointer on success, or **NULL** in + * case of failure. + */ +static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = (void *) 98; + +/* + * bpf_skc_lookup_tcp + * + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-**NULL**, released via **bpf_sk_release**\ (). + * + * This function is identical to **bpf_sk_lookup_tcp**\ (), except + * that it also returns timewait or request sockets. Use + * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the + * full structure. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * + * Returns + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from *reuse*\ **->socks**\ [] using the hash of the + * tuple. + */ +static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx, struct bpf_sock_tuple *tuple, __u32 tuple_size, __u64 netns, __u64 flags) = (void *) 99; + +/* + * bpf_tcp_check_syncookie + * + * Check whether *iph* and *th* contain a valid SYN cookie ACK for + * the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains **sizeof**\ (**struct tcphdr**). + * + * Returns + * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative + * error otherwise. + */ +static long (*bpf_tcp_check_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 100; + +/* + * bpf_sysctl_get_name + * + * Get name of sysctl in /proc/sys/ and copy it into provided by + * program buffer *buf* of size *buf_len*. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is + * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name + * only (e.g. "tcp_mem"). + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + */ +static long (*bpf_sysctl_get_name)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len, __u64 flags) = (void *) 101; + +/* + * bpf_sysctl_get_current_value + * + * Get current value of sysctl as it is presented in /proc/sys + * (incl. newline, etc), and copy it as a string into provided + * by program buffer *buf* of size *buf_len*. + * + * The whole value is copied, no matter what file position user + * space issued e.g. sys_read at. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if current value was unavailable, e.g. because + * sysctl is uninitialized and read returns -EIO for it. + */ +static long (*bpf_sysctl_get_current_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 102; + +/* + * bpf_sysctl_get_new_value + * + * Get new value being written by user space to sysctl (before + * the actual write happens) and copy it as a string into + * provided by program buffer *buf* of size *buf_len*. + * + * User space may write new value at file position > 0. + * + * The buffer is always NUL terminated, unless it's zero-sized. + * + * Returns + * Number of character copied (not including the trailing NUL). + * + * **-E2BIG** if the buffer wasn't big enough (*buf* will contain + * truncated name in this case). + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_get_new_value)(struct bpf_sysctl *ctx, char *buf, unsigned long buf_len) = (void *) 103; + +/* + * bpf_sysctl_set_new_value + * + * Override new value being written by user space to sysctl with + * value provided by program in buffer *buf* of size *buf_len*. + * + * *buf* should contain a string in same form as provided by user + * space on sysctl write. + * + * User space may write new value at file position > 0. To override + * the whole sysctl value file position should be set to zero. + * + * Returns + * 0 on success. + * + * **-E2BIG** if the *buf_len* is too big. + * + * **-EINVAL** if sysctl is being read. + */ +static long (*bpf_sysctl_set_new_value)(struct bpf_sysctl *ctx, const char *buf, unsigned long buf_len) = (void *) 104; + +/* + * bpf_strtol + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to a long integer according to the given base + * and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)) followed by a single + * optional '**-**' sign. + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtol**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtol)(const char *buf, unsigned long buf_len, __u64 flags, long *res) = (void *) 105; + +/* + * bpf_strtoul + * + * Convert the initial part of the string from buffer *buf* of + * size *buf_len* to an unsigned long integer according to the + * given base and save the result in *res*. + * + * The string may begin with an arbitrary amount of white space + * (as determined by **isspace**\ (3)). + * + * Five least significant bits of *flags* encode base, other bits + * are currently unused. + * + * Base must be either 8, 10, 16 or 0 to detect it automatically + * similar to user space **strtoul**\ (3). + * + * Returns + * Number of characters consumed on success. Must be positive but + * no more than *buf_len*. + * + * **-EINVAL** if no valid digits were found or unsupported base + * was provided. + * + * **-ERANGE** if resulting value was out of range. + */ +static long (*bpf_strtoul)(const char *buf, unsigned long buf_len, __u64 flags, unsigned long *res) = (void *) 106; + +/* + * bpf_sk_storage_get + * + * Get a bpf-local-storage from a *sk*. + * + * Logically, it could be thought of getting the value from + * a *map* with *sk* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this + * helper enforces the key must be a full socket and the map must + * be a **BPF_MAP_TYPE_SK_STORAGE** also. + * + * Underneath, the value is stored locally at *sk* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf-local-storages residing at *sk*. + * + * *sk* is a kernel **struct sock** pointer for LSM program. + * *sk* is a **struct bpf_sock** pointer for other program types. + * + * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf-local-storage will be + * created if one does not exist. *value* can be used + * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf-local-storage. If *value* is + * **NULL**, the new bpf-local-storage will be zero initialized. + * + * Returns + * A bpf-local-storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf-local-storage. + */ +static void *(*bpf_sk_storage_get)(void *map, void *sk, void *value, __u64 flags) = (void *) 107; + +/* + * bpf_sk_storage_delete + * + * Delete a bpf-local-storage from a *sk*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf-local-storage cannot be found. + * **-EINVAL** if sk is not a fullsock (e.g. a request_sock). + */ +static long (*bpf_sk_storage_delete)(void *map, void *sk) = (void *) 108; + +/* + * bpf_send_signal + * + * Send signal *sig* to the process of the current task. + * The signal may be delivered to any of this process's threads. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal)(__u32 sig) = (void *) 109; + +/* + * bpf_tcp_gen_syncookie + * + * Try to issue a SYN cookie for the packet with corresponding + * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header. + * + * Returns + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** SYN cookie cannot be issued due to error + * + * **-ENOENT** SYN cookie should not be issued (no SYN flood) + * + * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies + * + * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 + */ +static __s64 (*bpf_tcp_gen_syncookie)(void *sk, void *iph, __u32 iph_len, struct tcphdr *th, __u32 th_len) = (void *) 110; + +/* + * bpf_skb_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct sk_buff. + * + * This helper is similar to **bpf_perf_event_output**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_skb_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 111; + +/* + * bpf_probe_read_user + * + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_user)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 112; + +/* + * bpf_probe_read_kernel + * + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_probe_read_kernel)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 113; + +/* + * bpf_probe_read_user_str + * + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, returns the number of bytes that were written, + * including the terminal NUL. This makes this helper useful in + * tracing programs for reading strings, and more importantly to + * get its length at runtime. See the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user**\ () helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * + * Returns + * On success, the strictly positive length of the output string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_probe_read_user_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 114; + +/* + * bpf_probe_read_kernel_str + * + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. + * + * Returns + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. + */ +static long (*bpf_probe_read_kernel_str)(void *dst, __u32 size, const void *unsafe_ptr) = (void *) 115; + +/* + * bpf_tcp_send_ack + * + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. + * *rcv_nxt* is the ack_seq to be sent out. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_tcp_send_ack)(void *tp, __u32 rcv_nxt) = (void *) 116; + +/* + * bpf_send_signal_thread + * + * Send signal *sig* to the thread corresponding to the current task. + * + * Returns + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + */ +static long (*bpf_send_signal_thread)(__u32 sig) = (void *) 117; + +/* + * bpf_jiffies64 + * + * Obtain the 64bit jiffies + * + * Returns + * The 64 bit jiffies + */ +static __u64 (*bpf_jiffies64)(void) = (void *) 118; + +/* + * bpf_read_branch_records + * + * For an eBPF program attached to a perf event, retrieve the + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * + * Returns + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of **sizeof**\ (**struct perf_branch_entry**\ ). + * + * **-ENOENT** if architecture does not support branch records. + */ +static long (*bpf_read_branch_records)(struct bpf_perf_event_data *ctx, void *buf, __u32 size, __u64 flags) = (void *) 119; + +/* + * bpf_get_ns_current_pid_tgid + * + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * + * Returns + * 0 on success, or one of the following in case of failure: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + */ +static long (*bpf_get_ns_current_pid_tgid)(__u64 dev, __u64 ino, struct bpf_pidns_info *nsdata, __u32 size) = (void *) 120; + +/* + * bpf_xdp_output + * + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_xdp_output)(void *ctx, void *map, __u64 flags, void *data, __u64 size) = (void *) 121; + +/* + * bpf_get_netns_cookie + * + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. + * + * Returns + * A 8-byte long opaque number. + */ +static __u64 (*bpf_get_netns_cookie)(void *ctx) = (void *) 122; + +/* + * bpf_get_current_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of the cgroup associated + * with the current task at the *ancestor_level*. The root cgroup + * is at *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with the current task, then return value will be the + * same as that of **bpf_get_current_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with the current task. + * + * The format of returned id and helper limitations are same as in + * **bpf_get_current_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_get_current_ancestor_cgroup_id)(int ancestor_level) = (void *) 123; + +/* + * bpf_sk_assign + * + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * + * Assign the *sk* to the *skb*. When combined with appropriate + * routing configuration to receive the packet towards the socket, + * will cause *skb* to be delivered to the specified socket. + * Subsequent redirection of *skb* via **bpf_redirect**\ (), + * **bpf_clone_redirect**\ () or other methods outside of BPF may + * interfere with successful delivery to the socket. + * + * This operation is only valid from TC ingress path. + * + * The *flags* argument must be zero. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EINVAL** if specified *flags* are not supported. + * + * **-ENOENT** if the socket is unavailable for assignment. + * + * **-ENETUNREACH** if the socket is unreachable (wrong netns). + * + * **-EOPNOTSUPP** if the operation is not supported, for example + * a call from outside of TC ingress. + * + * **-ESOCKTNOSUPPORT** if the socket type is not supported + * (reuseport). + */ +static long (*bpf_sk_assign)(void *ctx, void *sk, __u64 flags) = (void *) 124; + +/* + * bpf_ktime_get_boot_ns + * + * Return the time elapsed since system boot, in nanoseconds. + * Does include the time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_boot_ns)(void) = (void *) 125; + +/* + * bpf_seq_printf + * + * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print + * out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a **u64** array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the size of *data* in bytes - must be a multiple of 8. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what **bpf_trace_printk**\ () does for now. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EBUSY** if per-CPU memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * + * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. + * + * **-E2BIG** if *fmt* contains too many format specifiers. + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_printf)(struct seq_file *m, const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 126; + +/* + * bpf_seq_write + * + * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * + * Returns + * 0 on success, or a negative error in case of failure: + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + */ +static long (*bpf_seq_write)(struct seq_file *m, const void *data, __u32 len) = (void *) 127; + +/* + * bpf_sk_cgroup_id + * + * Return the cgroup v2 id of the socket *sk*. + * + * *sk* must be a non-**NULL** pointer to a socket, e.g. one + * returned from **bpf_sk_lookup_xxx**\ (), + * **bpf_sk_fullsock**\ (), etc. The format of returned id is + * same as in **bpf_skb_cgroup_id**\ (). + * + * This helper is available only if the kernel was compiled with + * the **CONFIG_SOCK_CGROUP_DATA** configuration option. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_cgroup_id)(void *sk) = (void *) 128; + +/* + * bpf_sk_ancestor_cgroup_id + * + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *sk* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *sk*, then return value will be same as that + * of **bpf_sk_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *sk*. + * + * The format of returned id and helper limitations are same as in + * **bpf_sk_cgroup_id**\ (). + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_sk_ancestor_cgroup_id)(void *sk, int ancestor_level) = (void *) 129; + +/* + * bpf_ringbuf_output + * + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * An adaptive notification is a notification sent whenever the user-space + * process has caught up and consumed all available payloads. In case the user-space + * process is still processing a previous payload, then no notification is needed + * as it will process the newly added payload automatically. + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_ringbuf_output)(void *ringbuf, void *data, __u64 size, __u64 flags) = (void *) 130; + +/* + * bpf_ringbuf_reserve + * + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * *flags* must be 0. + * + * Returns + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + */ +static void *(*bpf_ringbuf_reserve)(void *ringbuf, __u64 size, __u64 flags) = (void *) 131; + +/* + * bpf_ringbuf_submit + * + * Submit reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_submit)(void *data, __u64 flags) = (void *) 132; + +/* + * bpf_ringbuf_discard + * + * Discard reserved ring buffer sample, pointed to by *data*. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. + * + * Returns + * Nothing. Always succeeds. + */ +static void (*bpf_ringbuf_discard)(void *data, __u64 flags) = (void *) 133; + +/* + * bpf_ringbuf_query + * + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * + * Returns + * Requested value, or 0, if *flags* are not recognized. + */ +static __u64 (*bpf_ringbuf_query)(void *ringbuf, __u64 flags) = (void *) 134; + +/* + * bpf_csum_level + * + * Change the skbs checksum level by one layer up or down, or + * reset it entirely to none in order to have the stack perform + * checksum validation. The level is applicable to the following + * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of + * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | + * through **bpf_skb_adjust_room**\ () helper with passing in + * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call + * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since + * the UDP header is removed. Similarly, an encap of the latter + * into the former could be accompanied by a helper call to + * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the + * skb is still intended to be processed in higher layers of the + * stack instead of just egressing at tc. + * + * There are three supported level settings at this time: + * + * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and + * sets CHECKSUM_NONE to force checksum validation by the stack. + * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current + * skb->csum_level. + * + * Returns + * 0 on success, or a negative error in case of failure. In the + * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level + * is returned or the error code -EACCES in case the skb is not + * subject to CHECKSUM_UNNECESSARY. + */ +static long (*bpf_csum_level)(struct __sk_buff *skb, __u64 level) = (void *) 135; + +/* + * bpf_skc_to_tcp6_sock + * + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp6_sock *(*bpf_skc_to_tcp6_sock)(void *sk) = (void *) 136; + +/* + * bpf_skc_to_tcp_sock + * + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_sock *(*bpf_skc_to_tcp_sock)(void *sk) = (void *) 137; + +/* + * bpf_skc_to_tcp_timewait_sock + * + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_timewait_sock *(*bpf_skc_to_tcp_timewait_sock)(void *sk) = (void *) 138; + +/* + * bpf_skc_to_tcp_request_sock + * + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct tcp_request_sock *(*bpf_skc_to_tcp_request_sock)(void *sk) = (void *) 139; + +/* + * bpf_skc_to_udp6_sock + * + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct udp6_sock *(*bpf_skc_to_udp6_sock)(void *sk) = (void *) 140; + +/* + * bpf_get_task_stack + * + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to **struct task_struct**. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * + * Returns + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + */ +static long (*bpf_get_task_stack)(struct task_struct *task, void *buf, __u32 size, __u64 flags) = (void *) 141; + +/* + * bpf_load_hdr_opt + * + * Load header option. Support reading a particular TCP header + * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**). + * + * If *flags* is 0, it will search the option from the + * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops** + * has details on what skb_data contains under different + * *skops*\ **->op**. + * + * The first byte of the *searchby_res* specifies the + * kind that it wants to search. + * + * If the searching kind is an experimental kind + * (i.e. 253 or 254 according to RFC6994). It also + * needs to specify the "magic" which is either + * 2 bytes or 4 bytes. It then also needs to + * specify the size of the magic by using + * the 2nd byte which is "kind-length" of a TCP + * header option and the "kind-length" also + * includes the first 2 bytes "kind" and "kind-length" + * itself as a normal TCP header option also does. + * + * For example, to search experimental kind 254 with + * 2 byte magic 0xeB9F, the searchby_res should be + * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ]. + * + * To search for the standard window scale option (3), + * the *searchby_res* should be [ 3, 0, 0, .... 0 ]. + * Note, kind-length must be 0 for regular option. + * + * Searching for No-Op (0) and End-of-Option-List (1) are + * not supported. + * + * *len* must be at least 2 bytes which is the minimal size + * of a header option. + * + * Supported flags: + * + * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the + * saved_syn packet or the just-received syn packet. + * + * + * Returns + * > 0 when found, the header option is copied to *searchby_res*. + * The return value is the total length copied. On failure, a + * negative error code is returned: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOMSG** if the option is not found. + * + * **-ENOENT** if no syn packet is available when + * **BPF_LOAD_HDR_OPT_TCP_SYN** is used. + * + * **-ENOSPC** if there is not enough space. Only *len* number of + * bytes are copied. + * + * **-EFAULT** on failure to parse the header options in the + * packet. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_load_hdr_opt)(struct bpf_sock_ops *skops, void *searchby_res, __u32 len, __u64 flags) = (void *) 142; + +/* + * bpf_store_hdr_opt + * + * Store header option. The data will be copied + * from buffer *from* with length *len* to the TCP header. + * + * The buffer *from* should have the whole option that + * includes the kind, kind-length, and the actual + * option data. The *len* must be at least kind-length + * long. The kind-length does not have to be 4 byte + * aligned. The kernel will take care of the padding + * and setting the 4 bytes aligned value to th->doff. + * + * This helper will check for duplicated option + * by searching the same option in the outgoing skb. + * + * This helper can only be called during + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * + * Returns + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** If param is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * Nothing has been written + * + * **-EEXIST** if the option already exists. + * + * **-EFAULT** on failrue to parse the existing header options. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_store_hdr_opt)(struct bpf_sock_ops *skops, const void *from, __u32 len, __u64 flags) = (void *) 143; + +/* + * bpf_reserve_hdr_opt + * + * Reserve *len* bytes for the bpf header option. The + * space will be used by **bpf_store_hdr_opt**\ () later in + * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**. + * + * If **bpf_reserve_hdr_opt**\ () is called multiple times, + * the total number of bytes will be reserved. + * + * This helper can only be called during + * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**. + * + * + * Returns + * 0 on success, or negative error in case of failure: + * + * **-EINVAL** if a parameter is invalid. + * + * **-ENOSPC** if there is not enough space in the header. + * + * **-EPERM** if the helper cannot be used under the current + * *skops*\ **->op**. + */ +static long (*bpf_reserve_hdr_opt)(struct bpf_sock_ops *skops, __u32 len, __u64 flags) = (void *) 144; + +/* + * bpf_inode_storage_get + * + * Get a bpf_local_storage from an *inode*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *inode* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this + * helper enforces the key must be an inode and the map must also + * be a **BPF_MAP_TYPE_INODE_STORAGE**. + * + * Underneath, the value is stored locally at *inode* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *inode*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * + * Returns + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + */ +static void *(*bpf_inode_storage_get)(void *map, void *inode, void *value, __u64 flags) = (void *) 145; + +/* + * bpf_inode_storage_delete + * + * Delete a bpf_local_storage from an *inode*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + */ +static int (*bpf_inode_storage_delete)(void *map, void *inode) = (void *) 146; + +/* + * bpf_d_path + * + * Return full path for given **struct path** object, which + * needs to be the kernel BTF *path* object. The path is + * returned in the provided buffer *buf* of size *sz* and + * is zero terminated. + * + * + * Returns + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + */ +static long (*bpf_d_path)(struct path *path, char *buf, __u32 sz) = (void *) 147; + +/* + * bpf_copy_from_user + * + * Read *size* bytes from user space address *user_ptr* and store + * the data in *dst*. This is a wrapper of **copy_from_user**\ (). + * + * Returns + * 0 on success, or a negative error in case of failure. + */ +static long (*bpf_copy_from_user)(void *dst, __u32 size, const void *user_ptr) = (void *) 148; + +/* + * bpf_snprintf_btf + * + * Use BTF to store a string representation of *ptr*->ptr in *str*, + * using *ptr*->type_id. This value should specify the type + * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1) + * can be used to look up vmlinux BTF type ids. Traversing the + * data structure using BTF, the type information and values are + * stored in the first *str_size* - 1 bytes of *str*. Safe copy of + * the pointer data is carried out to avoid kernel crashes during + * operation. Smaller types can use string space on the stack; + * larger programs can use map data to store the string + * representation. + * + * The string can be subsequently shared with userspace via + * bpf_perf_event_output() or ring buffer interfaces. + * bpf_trace_printk() is to be avoided as it places too small + * a limit on string size to be useful. + * + * *flags* is a combination of + * + * **BTF_F_COMPACT** + * no formatting around type information + * **BTF_F_NONAME** + * no struct/union member names/types + * **BTF_F_PTR_RAW** + * show raw (unobfuscated) pointer values; + * equivalent to printk specifier %px. + * **BTF_F_ZERO** + * show zero-valued struct/union members; they + * are not displayed by default + * + * + * Returns + * The number of bytes that were written (or would have been + * written if output had to be truncated due to string size), + * or a negative error in cases of failure. + */ +static long (*bpf_snprintf_btf)(char *str, __u32 str_size, struct btf_ptr *ptr, __u32 btf_ptr_size, __u64 flags) = (void *) 149; + +/* + * bpf_seq_printf_btf + * + * Use BTF to write to seq_write a string representation of + * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf(). + * *flags* are identical to those used for bpf_snprintf_btf. + * + * Returns + * 0 on success or a negative error in case of failure. + */ +static long (*bpf_seq_printf_btf)(struct seq_file *m, struct btf_ptr *ptr, __u32 ptr_size, __u64 flags) = (void *) 150; + +/* + * bpf_skb_cgroup_classid + * + * See **bpf_get_cgroup_classid**\ () for the main description. + * This helper differs from **bpf_get_cgroup_classid**\ () in that + * the cgroup v1 net_cls class is retrieved only from the *skb*'s + * associated socket instead of the current process. + * + * Returns + * The id is returned or 0 in case the id could not be retrieved. + */ +static __u64 (*bpf_skb_cgroup_classid)(struct __sk_buff *skb) = (void *) 151; + +/* + * bpf_redirect_neigh + * + * Redirect the packet to another net device of index *ifindex* + * and fill in L2 addresses from neighboring subsystem. This helper + * is somewhat similar to **bpf_redirect**\ (), except that it + * populates L2 addresses as well, meaning, internally, the helper + * relies on the neighbor lookup for the L2 address of the nexthop. + * + * The helper will perform a FIB lookup based on the skb's + * networking header to get the address of the next hop, unless + * this is supplied by the caller in the *params* argument. The + * *plen* argument indicates the len of *params* and should be set + * to 0 if *params* is NULL. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types, and enabled + * for IPv4 and IPv6 protocols. + * + * Returns + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + */ +static long (*bpf_redirect_neigh)(__u32 ifindex, struct bpf_redir_neigh *params, int plen, __u64 flags) = (void *) 152; + +/* + * bpf_per_cpu_ptr + * + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on *cpu*. A ksym is an + * extern variable decorated with '__ksym'. For ksym, there is a + * global var (either static or global) defined of the same name + * in the kernel. The ksym is percpu if the global var is percpu. + * The returned pointer points to the global percpu var on *cpu*. + * + * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the + * kernel, except that bpf_per_cpu_ptr() may return NULL. This + * happens if *cpu* is larger than nr_cpu_ids. The caller of + * bpf_per_cpu_ptr() must check the returned value. + * + * Returns + * A pointer pointing to the kernel percpu variable on *cpu*, or + * NULL, if *cpu* is invalid. + */ +static void *(*bpf_per_cpu_ptr)(const void *percpu_ptr, __u32 cpu) = (void *) 153; + +/* + * bpf_this_cpu_ptr + * + * Take a pointer to a percpu ksym, *percpu_ptr*, and return a + * pointer to the percpu kernel variable on this cpu. See the + * description of 'ksym' in **bpf_per_cpu_ptr**\ (). + * + * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in + * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would + * never return NULL. + * + * Returns + * A pointer pointing to the kernel percpu variable on this cpu. + */ +static void *(*bpf_this_cpu_ptr)(const void *percpu_ptr) = (void *) 154; + +/* + * bpf_redirect_peer + * + * Redirect the packet to another net device of index *ifindex*. + * This helper is somewhat similar to **bpf_redirect**\ (), except + * that the redirection happens to the *ifindex*' peer device and + * the netns switch takes place from ingress to ingress without + * going through the CPU's backlog queue. + * + * The *flags* argument is reserved and must be 0. The helper is + * currently only supported for tc BPF program types at the ingress + * hook and for veth device types. The peer device must reside in a + * different network namespace. + * + * Returns + * The helper returns **TC_ACT_REDIRECT** on success or + * **TC_ACT_SHOT** on error. + */ +static long (*bpf_redirect_peer)(__u32 ifindex, __u64 flags) = (void *) 155; + +/* + * bpf_task_storage_get + * + * Get a bpf_local_storage from the *task*. + * + * Logically, it could be thought of as getting the value from + * a *map* with *task* as the **key**. From this + * perspective, the usage is not much different from + * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this + * helper enforces the key must be an task_struct and the map must also + * be a **BPF_MAP_TYPE_TASK_STORAGE**. + * + * Underneath, the value is stored locally at *task* instead of + * the *map*. The *map* is used as the bpf-local-storage + * "type". The bpf-local-storage "type" (i.e. the *map*) is + * searched against all bpf_local_storage residing at *task*. + * + * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be + * used such that a new bpf_local_storage will be + * created if one does not exist. *value* can be used + * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify + * the initial value of a bpf_local_storage. If *value* is + * **NULL**, the new bpf_local_storage will be zero initialized. + * + * Returns + * A bpf_local_storage pointer is returned on success. + * + * **NULL** if not found or there was an error in adding + * a new bpf_local_storage. + */ +static void *(*bpf_task_storage_get)(void *map, struct task_struct *task, void *value, __u64 flags) = (void *) 156; + +/* + * bpf_task_storage_delete + * + * Delete a bpf_local_storage from a *task*. + * + * Returns + * 0 on success. + * + * **-ENOENT** if the bpf_local_storage cannot be found. + */ +static long (*bpf_task_storage_delete)(void *map, struct task_struct *task) = (void *) 157; + +/* + * bpf_get_current_task_btf + * + * Return a BTF pointer to the "current" task. + * This pointer can also be used in helpers that accept an + * *ARG_PTR_TO_BTF_ID* of type *task_struct*. + * + * Returns + * Pointer to the current task. + */ +static struct task_struct *(*bpf_get_current_task_btf)(void) = (void *) 158; + +/* + * bpf_bprm_opts_set + * + * Set or clear certain options on *bprm*: + * + * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit + * which sets the **AT_SECURE** auxv for glibc. The bit + * is cleared if the flag is not specified. + * + * Returns + * **-EINVAL** if invalid *flags* are passed, zero otherwise. + */ +static long (*bpf_bprm_opts_set)(struct linux_binprm *bprm, __u64 flags) = (void *) 159; + +/* + * bpf_ktime_get_coarse_ns + * + * Return a coarse-grained version of the time elapsed since + * system boot, in nanoseconds. Does not include time the system + * was suspended. + * + * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**) + * + * Returns + * Current *ktime*. + */ +static __u64 (*bpf_ktime_get_coarse_ns)(void) = (void *) 160; + +/* + * bpf_ima_inode_hash + * + * Returns the stored IMA hash of the *inode* (if it's avaialable). + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * + * Returns + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if + * invalid arguments are passed. + */ +static long (*bpf_ima_inode_hash)(struct inode *inode, void *dst, __u32 size) = (void *) 161; + +/* + * bpf_sock_from_file + * + * If the given file represents a socket, returns the associated + * socket. + * + * Returns + * A pointer to a struct socket on success or NULL if the file is + * not a socket. + */ +static struct socket *(*bpf_sock_from_file)(struct file *file) = (void *) 162; + +/* + * bpf_check_mtu + * + * Check packet size against exceeding MTU of net device (based + * on *ifindex*). This helper will likely be used in combination + * with helpers that adjust/change the packet size. + * + * The argument *len_diff* can be used for querying with a planned + * size change. This allows to check MTU prior to changing packet + * ctx. Providing an *len_diff* adjustment that is larger than the + * actual packet size (resulting in negative packet size) will in + * principle not exceed the MTU, why it is not considered a + * failure. Other BPF-helpers are needed for performing the + * planned size change, why the responsability for catch a negative + * packet size belong in those helpers. + * + * Specifying *ifindex* zero means the MTU check is performed + * against the current net device. This is practical if this isn't + * used prior to redirect. + * + * On input *mtu_len* must be a valid pointer, else verifier will + * reject BPF program. If the value *mtu_len* is initialized to + * zero then the ctx packet size is use. When value *mtu_len* is + * provided as input this specify the L3 length that the MTU check + * is done against. Remember XDP and TC length operate at L2, but + * this value is L3 as this correlate to MTU and IP-header tot_len + * values which are L3 (similar behavior as bpf_fib_lookup). + * + * The Linux kernel route table can configure MTUs on a more + * specific per route level, which is not provided by this helper. + * For route level MTU checks use the **bpf_fib_lookup**\ () + * helper. + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** for tc cls_act programs. + * + * The *flags* argument can be a combination of one or more of the + * following values: + * + * **BPF_MTU_CHK_SEGS** + * This flag will only works for *ctx* **struct sk_buff**. + * If packet context contains extra packet segment buffers + * (often knows as GSO skb), then MTU check is harder to + * check at this point, because in transmit path it is + * possible for the skb packet to get re-segmented + * (depending on net device features). This could still be + * a MTU violation, so this flag enables performing MTU + * check against segments, with a different violation + * return code to tell it apart. Check cannot use len_diff. + * + * On return *mtu_len* pointer contains the MTU value of the net + * device. Remember the net device configured MTU is the L3 size, + * which is returned here and XDP and TC length operate at L2. + * Helper take this into account for you, but remember when using + * MTU value in your BPF-code. + * + * + * Returns + * * 0 on success, and populate MTU value in *mtu_len* pointer. + * + * * < 0 if any input argument is invalid (*mtu_len* not updated) + * + * MTU violations return positive values, but also populate MTU + * value in *mtu_len* pointer, as this can be needed for + * implementing PMTU handing: + * + * * **BPF_MTU_CHK_RET_FRAG_NEEDED** + * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** + */ +static long (*bpf_check_mtu)(void *ctx, __u32 ifindex, __u32 *mtu_len, __s32 len_diff, __u64 flags) = (void *) 163; + +/* + * bpf_for_each_map_elem + * + * For each element in **map**, call **callback_fn** function with + * **map**, **callback_ctx** and other map-specific parameters. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. + * + * The following are a list of supported map types and their + * respective expected callback signatures: + * + * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, + * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY + * + * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); + * + * For per_cpu maps, the map_value is the value on the cpu where the + * bpf_prog is running. + * + * If **callback_fn** return 0, the helper will continue to the next + * element. If return value is 1, the helper will skip the rest of + * elements and return. Other return values are not used now. + * + * + * Returns + * The number of traversed map elements for success, **-EINVAL** for + * invalid **flags**. + */ +static long (*bpf_for_each_map_elem)(void *map, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 164; + +/* + * bpf_snprintf + * + * Outputs a string into the **str** buffer of size **str_size** + * based on a format string stored in a read-only map pointed by + * **fmt**. + * + * Each format specifier in **fmt** corresponds to one u64 element + * in the **data** array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* + * array. The *data_len* is the size of *data* in bytes - must be + * a multiple of 8. + * + * Formats **%s** and **%p{i,I}{4,6}** require to read kernel + * memory. Reading kernel memory may fail due to either invalid + * address or valid address but requiring a major memory fault. If + * reading kernel memory fails, the string for **%s** will be an + * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. + * Not returning error to bpf program is consistent with what + * **bpf_trace_printk**\ () does for now. + * + * + * Returns + * The strictly positive length of the formatted string, including + * the trailing zero character. If the return value is greater than + * **str_size**, **str** contains a truncated string, guaranteed to + * be zero-terminated except when **str_size** is 0. + * + * Or **-EBUSY** if the per-CPU memory copy buffer is busy. + */ +static long (*bpf_snprintf)(char *str, __u32 str_size, const char *fmt, __u64 *data, __u32 data_len) = (void *) 165; + +/* + * bpf_sys_bpf + * + * Execute bpf syscall with given arguments. + * + * Returns + * A syscall result. + */ +static long (*bpf_sys_bpf)(__u32 cmd, void *attr, __u32 attr_size) = (void *) 166; + +/* + * bpf_btf_find_by_name_kind + * + * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs. + * + * Returns + * Returns btf_id and btf_obj_fd in lower and upper 32 bits. + */ +static long (*bpf_btf_find_by_name_kind)(char *name, int name_sz, __u32 kind, int flags) = (void *) 167; + +/* + * bpf_sys_close + * + * Execute close syscall for given FD. + * + * Returns + * A syscall result. + */ +static long (*bpf_sys_close)(__u32 fd) = (void *) 168; + +/* + * bpf_timer_init + * + * Initialize the timer. + * First 4 bits of *flags* specify clockid. + * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed. + * All other bits of *flags* are reserved. + * The verifier will reject the program if *timer* is not from + * the same *map*. + * + * Returns + * 0 on success. + * **-EBUSY** if *timer* is already initialized. + * **-EINVAL** if invalid *flags* are passed. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + */ +static long (*bpf_timer_init)(struct bpf_timer *timer, void *map, __u64 flags) = (void *) 169; + +/* + * bpf_timer_set_callback + * + * Configure the timer to call *callback_fn* static function. + * + * Returns + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EPERM** if *timer* is in a map that doesn't have any user references. + * The user space should either hold a file descriptor to a map with timers + * or pin such map in bpffs. When map is unpinned or file descriptor is + * closed all timers in the map will be cancelled and freed. + */ +static long (*bpf_timer_set_callback)(struct bpf_timer *timer, void *callback_fn) = (void *) 170; + +/* + * bpf_timer_start + * + * Set timer expiration N nanoseconds from the current time. The + * configured callback will be invoked in soft irq context on some cpu + * and will not repeat unless another bpf_timer_start() is made. + * In such case the next invocation can migrate to a different cpu. + * Since struct bpf_timer is a field inside map element the map + * owns the timer. The bpf_timer_set_callback() will increment refcnt + * of BPF program to make sure that callback_fn code stays valid. + * When user space reference to a map reaches zero all timers + * in a map are cancelled and corresponding program's refcnts are + * decremented. This is done to make sure that Ctrl-C of a user + * process doesn't leave any timers running. If map is pinned in + * bpffs the callback_fn can re-arm itself indefinitely. + * bpf_map_update/delete_elem() helpers and user space sys_bpf commands + * cancel and free the timer in the given map element. + * The map can contain timers that invoke callback_fn-s from different + * programs. The same callback_fn can serve different timers from + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * + * + * Returns + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier + * or invalid *flags* are passed. + */ +static long (*bpf_timer_start)(struct bpf_timer *timer, __u64 nsecs, __u64 flags) = (void *) 171; + +/* + * bpf_timer_cancel + * + * Cancel the timer and wait for callback_fn to finish if it was running. + * + * Returns + * 0 if the timer was not active. + * 1 if the timer was active. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier. + * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its + * own timer which would have led to a deadlock otherwise. + */ +static long (*bpf_timer_cancel)(struct bpf_timer *timer) = (void *) 172; + +/* + * bpf_get_func_ip + * + * Get address of the traced function (for tracing and kprobe programs). + * + * Returns + * Address of the traced function. + */ +static __u64 (*bpf_get_func_ip)(void *ctx) = (void *) 173; + +/* + * bpf_get_attach_cookie + * + * Get bpf_cookie value provided (optionally) during the program + * attachment. It might be different for each individual + * attachment, even if BPF program itself is the same. + * Expects BPF program context *ctx* as a first argument. + * + * Supported for the following program types: + * - kprobe/uprobe; + * - tracepoint; + * - perf_event. + * + * Returns + * Value specified by user at BPF link creation/attachment time + * or 0, if it was not specified. + */ +static __u64 (*bpf_get_attach_cookie)(void *ctx) = (void *) 174; + +/* + * bpf_task_pt_regs + * + * Get the struct pt_regs associated with **task**. + * + * Returns + * A pointer to struct pt_regs. + */ +static long (*bpf_task_pt_regs)(struct task_struct *task) = (void *) 175; + +/* + * bpf_get_branch_snapshot + * + * Get branch trace from hardware engines like Intel LBR. The + * hardware engine is stopped shortly after the helper is + * called. Therefore, the user need to filter branch entries + * based on the actual use case. To capture branch trace + * before the trigger point of the BPF program, the helper + * should be called at the beginning of the BPF program. + * + * The data is stored as struct perf_branch_entry into output + * buffer *entries*. *size* is the size of *entries* in bytes. + * *flags* is reserved for now and must be zero. + * + * + * Returns + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-ENOENT** if architecture does not support branch records. + */ +static long (*bpf_get_branch_snapshot)(void *entries, __u32 size, __u64 flags) = (void *) 176; + +/* + * bpf_trace_vprintk + * + * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64 + * to format and can handle more format args as a result. + * + * Arguments are to be used as in **bpf_seq_printf**\ () helper. + * + * Returns + * The number of bytes written to the buffer, or a negative error + * in case of failure. + */ +static long (*bpf_trace_vprintk)(const char *fmt, __u32 fmt_size, const void *data, __u32 data_len) = (void *) 177; + +/* + * bpf_skc_to_unix_sock + * + * Dynamically cast a *sk* pointer to a *unix_sock* pointer. + * + * Returns + * *sk* if casting is valid, or **NULL** otherwise. + */ +static struct unix_sock *(*bpf_skc_to_unix_sock)(void *sk) = (void *) 178; + +/* + * bpf_kallsyms_lookup_name + * + * Get the address of a kernel symbol, returned in *res*. *res* is + * set to 0 if the symbol is not found. + * + * Returns + * On success, zero. On error, a negative value. + * + * **-EINVAL** if *flags* is not zero. + * + * **-EINVAL** if string *name* is not the same size as *name_sz*. + * + * **-ENOENT** if symbol is not found. + * + * **-EPERM** if caller does not have permission to obtain kernel address. + */ +static long (*bpf_kallsyms_lookup_name)(const char *name, int name_sz, int flags, __u64 *res) = (void *) 179; + +/* + * bpf_find_vma + * + * Find vma of *task* that contains *addr*, call *callback_fn* + * function with *task*, *vma*, and *callback_ctx*. + * The *callback_fn* should be a static function and + * the *callback_ctx* should be a pointer to the stack. + * The *flags* is used to control certain aspects of the helper. + * Currently, the *flags* must be 0. + * + * The expected callback signature is + * + * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx); + * + * + * Returns + * 0 on success. + * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*. + * **-EBUSY** if failed to try lock mmap_lock. + * **-EINVAL** for invalid **flags**. + */ +static long (*bpf_find_vma)(struct task_struct *task, __u64 addr, void *callback_fn, void *callback_ctx, __u64 flags) = (void *) 180; + diff --git a/component/control/kern/bpf_helpers.h b/component/control/kern/bpf_helpers.h new file mode 100644 index 0000000..3b2a9c2 --- /dev/null +++ b/component/control/kern/bpf_helpers.h @@ -0,0 +1,263 @@ +// Copied from https://github.com/cilium/ebpf/blob/e0ada270a5/examples/headers/bpf_helpers.h +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_HELPERS__ +#define __BPF_HELPERS__ + +/* + * Note that bpf programs need to include either + * vmlinux.h (auto-generated from BTF) or linux/types.h + * in advance since bpf_helper_defs.h uses such types + * as __u64. + */ +#include "bpf_helper_defs.h" + +#define __uint(name, val) int (*name)[val] +#define __type(name, val) typeof(val) *name +#define __array(name, val) typeof(val) *name[] + +/* + * Helper macro to place programs, maps, license in + * different sections in elf_bpf file. Section names + * are interpreted by libbpf depending on the context (BPF programs, BPF maps, + * extern variables, etc). + * To allow use of SEC() with externs (e.g., for extern .maps declarations), + * make sure __attribute__((unused)) doesn't trigger compilation warning. + */ +#define SEC(name) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ + __attribute__((section(name), used)) \ + _Pragma("GCC diagnostic pop") \ + +/* Avoid 'linux/stddef.h' definition of '__always_inline'. */ +#undef __always_inline +#define __always_inline inline __attribute__((always_inline)) + +#ifndef __noinline +#define __noinline __attribute__((noinline)) +#endif +#ifndef __weak +#define __weak __attribute__((weak)) +#endif + +/* + * Use __hidden attribute to mark a non-static BPF subprogram effectively + * static for BPF verifier's verification algorithm purposes, allowing more + * extensive and permissive BPF verification process, taking into account + * subprogram's caller context. + */ +#define __hidden __attribute__((visibility("hidden"))) + +/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include + * any system-level headers (such as stddef.h, linux/version.h, etc), and + * commonly-used macros like NULL and KERNEL_VERSION aren't available through + * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define + * them on their own. So as a convenience, provide such definitions here. + */ +#ifndef NULL +#define NULL ((void *)0) +#endif + +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) +#endif + +/* + * Helper macros to manipulate data structures + */ +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) +#endif +#ifndef container_of +#define container_of(ptr, type, member) \ + ({ \ + void *__mptr = (void *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +/* + * Helper macro to throw a compilation error if __bpf_unreachable() gets + * built into the resulting code. This works given BPF back end does not + * implement __builtin_trap(). This is useful to assert that certain paths + * of the program code are never used and hence eliminated by the compiler. + * + * For example, consider a switch statement that covers known cases used by + * the program. __bpf_unreachable() can then reside in the default case. If + * the program gets extended such that a case is not covered in the switch + * statement, then it will throw a build error due to the default case not + * being compiled out. + */ +#ifndef __bpf_unreachable +# define __bpf_unreachable() __builtin_trap() +#endif + +/* + * Helper function to perform a tail call with a constant/immediate map slot. + */ +#if __clang_major__ >= 8 && defined(__bpf__) +static __always_inline void +bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) +{ + if (!__builtin_constant_p(slot)) + __bpf_unreachable(); + + /* + * Provide a hard guarantee that LLVM won't optimize setting r2 (map + * pointer) and r3 (constant map index) from _different paths_ ending + * up at the _same_ call insn as otherwise we won't be able to use the + * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel + * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key + * tracking for prog array pokes") for details on verifier tracking. + * + * Note on clobber list: we need to stay in-line with BPF calling + * convention, so even if we don't end up using r0, r4, r5, we need + * to mark them as clobber so that LLVM doesn't end up using them + * before / after the call. + */ + asm volatile("r1 = %[ctx]\n\t" + "r2 = %[map]\n\t" + "r3 = %[slot]\n\t" + "call 12" + :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) + : "r0", "r1", "r2", "r3", "r4", "r5"); +} +#endif + +/* + * Helper structure used by eBPF C program + * to describe BPF map attributes to libbpf loader + */ +struct bpf_map_def { + unsigned int type; + unsigned int key_size; + unsigned int value_size; + unsigned int max_entries; + unsigned int map_flags; +}; + +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + +enum libbpf_tristate { + TRI_NO = 0, + TRI_YES = 1, + TRI_MODULE = 2, +}; + +#define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) + +#ifndef ___bpf_concat +#define ___bpf_concat(a, b) a ## b +#endif +#ifndef ___bpf_apply +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#endif +#ifndef ___bpf_nth +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#endif +#ifndef ___bpf_narg +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#endif + +#define ___bpf_fill0(arr, p, x) do {} while (0) +#define ___bpf_fill1(arr, p, x) arr[p] = x +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) +#define ___bpf_fill(arr, args...) \ + ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) + +/* + * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values + * in a structure. + */ +#define BPF_SEQ_PRINTF(seq, fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ +}) + +/* + * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of + * an array of u64. + */ +#define BPF_SNPRINTF(out, out_size, fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_snprintf(out, out_size, ___fmt, \ + ___param, sizeof(___param)); \ +}) + +#ifdef BPF_NO_GLOBAL_DATA +#define BPF_PRINTK_FMT_MOD +#else +#define BPF_PRINTK_FMT_MOD static const +#endif + +#define __bpf_printk(fmt, ...) \ +({ \ + BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +/* + * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments + * instead of an array of u64. + */ +#define __bpf_vprintk(fmt, args...) \ +({ \ + static const char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args)]; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + bpf_trace_vprintk(___fmt, sizeof(___fmt), \ + ___param, sizeof(___param)); \ +}) + +/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args + * Otherwise use __bpf_vprintk + */ +#define ___bpf_pick_printk(...) \ + ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ + __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ + __bpf_printk /*1*/, __bpf_printk /*0*/) + +/* Helper macro to print out debug messages */ +#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) + +#endif \ No newline at end of file diff --git a/component/control/kern/tproxy.c b/component/control/kern/tproxy.c index a5604a6..7879214 100644 --- a/component/control/kern/tproxy.c +++ b/component/control/kern/tproxy.c @@ -1,30 +1,23 @@ +// +build ignore /* * SPDX-License-Identifier: AGPL-3.0-only * Copyright (c) since 2022, mzz2017 (mzz@tuta.io). All rights reserved. */ - #include -#include #include #include #include #include #include #include -#include #include #include -#include #include -#include #include -#include -#include -#include -#include +#include "bpf_endian.h" +#include "bpf_helpers.h" -// #include "addr.h" // #define likely(x) x // #define unlikely(x) x @@ -41,12 +34,15 @@ #define MAX_PARAM_LEN 16 #define MAX_INTERFACE_NUM 128 -#define MAX_ROUTING_LEN 96 +#define MAX_ROUTING_LEN (32 * 3) #define MAX_LPM_SIZE 20480 +//#define MAX_LPM_SIZE 20480 #define MAX_LPM_NUM (MAX_ROUTING_LEN + 8) +#define MAX_DEST_MAPPING_NUM (65536 * 2) #define IPV6_MAX_EXTENSIONS 4 #define OUTBOUND_DIRECT 0 +#define OUTBOUND_BLOCK 1 #define OUTBOUND_CONTROL_PLANE_DIRECT 0xFE #define OUTBOUND_LOGICAL_AND 0xFF @@ -62,10 +58,6 @@ static const __u32 zero_key = 0; static const __u32 tproxy_port_key = 1; static const __u32 disable_l4_tx_checksum_key = 2; static const __u32 disable_l4_rx_checksum_key = 3; -static const __u32 epoch_key __attribute__((unused, deprecated)) = 4; -static const __u32 routings_len_key __attribute__((unused, deprecated)) = 5; - -static __be32 unspecific_ipv6[4] __attribute__((__unused__)) = {0, 0, 0, 0}; struct ip_port { __be32 ip[4]; @@ -96,7 +88,7 @@ struct { // enough for identifier. And UDP client // side does not care it (full-cone). __type(value, struct ip_port_outbound); // Original target. - __uint(max_entries, 0xFF << 2); + __uint(max_entries, MAX_DEST_MAPPING_NUM); /// NOTICE: It MUST be pinned. __uint(pinning, LIBBPF_PIN_BY_NAME); } dst_map SEC(".maps"); @@ -132,7 +124,7 @@ struct { __uint(max_entries, MAX_INTERFACE_NUM); /// NOTICE: No persistence. // __uint(pinning, LIBBPF_PIN_BY_NAME); -} ifindex_ip_map SEC(".maps"); +} ifindex_tproxy_ip_map SEC(".maps"); // Array of LPM tries: struct lpm_key { @@ -145,12 +137,12 @@ struct map_lpm_type { __uint(max_entries, MAX_LPM_SIZE); __uint(key_size, sizeof(struct lpm_key)); __uint(value_size, sizeof(__u32)); -} unused_lpm_type SEC(".maps"); +} unused_lpm_type SEC(".maps"), host_ip_lpm SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(key_size, sizeof(__u32)); __uint(max_entries, MAX_LPM_NUM); - __uint(pinning, LIBBPF_PIN_BY_NAME); + // __uint(pinning, LIBBPF_PIN_BY_NAME); __array(values, struct map_lpm_type); } lpm_array_map SEC(".maps"); @@ -199,16 +191,11 @@ struct { __type(key, __u32); __type(value, struct routing); __uint(max_entries, MAX_ROUTING_LEN); - __uint(pinning, LIBBPF_PIN_BY_NAME); + // __uint(pinning, LIBBPF_PIN_BY_NAME); } routing_map SEC(".maps"); struct domain_routing { __u32 bitmap[MAX_ROUTING_LEN / 32]; - /// DEPRECATED: Epoch is the epoch at the write time. Every time the control - /// plane restarts, epoch += 1. It was deprecated because long connection will - /// keep their states by persistent dst_map (we only need to know if it is a - /// old connection). - __u32 epoch; }; struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); @@ -220,7 +207,7 @@ struct { // Functions: -static __always_inline bool equal_ipv6(__be32 x[4], __be32 y[4]) { +static __always_inline bool equal_ipv6_format(__be32 x[4], __be32 y[4]) { #if __clang_major__ >= 10 return ((__be64 *)x)[0] == ((__be64 *)y)[0] && ((__be64 *)x)[1] == ((__be64 *)y)[1]; @@ -248,7 +235,7 @@ static __always_inline long rewrite_ip(struct __sk_buff *skb, bool is_ipv6, __u8 proto, __u8 ihl, __be32 old_ip[4], __be32 new_ip[4], bool is_dest) { // Nothing to do. - if (equal_ipv6(old_ip, new_ip)) { + if (equal_ipv6_format(old_ip, new_ip)) { return 0; } // bpf_printk("%pI6->%pI6", old_ip, new_ip); @@ -486,28 +473,26 @@ parse_transport(struct __sk_buff *skb, struct ethhdr **ethh, struct iphdr **iph, } static __always_inline long ip_is_host(bool is_ipv6, __u32 ifindex, - __be32 ip[4], - __be32 (*first_interface_ip)[4]) { - struct if_ip *if_ip = bpf_map_lookup_elem(&ifindex_ip_map, &ifindex); - if (unlikely(!if_ip)) { - return -1; + __be32 ip[4], __be32 tproxy_ip[4]) { + if (tproxy_ip) { + struct if_ip *if_ip = bpf_map_lookup_elem(&ifindex_tproxy_ip_map, &ifindex); + if (unlikely(!if_ip)) { + return -1; + } + if (!is_ipv6 && (*if_ip).hasIp4) { + __builtin_memcpy(tproxy_ip, (*if_ip).ip4, IPV6_BYTE_LENGTH); + } else if (is_ipv6 && (*if_ip).hasIp6) { + __builtin_memcpy(tproxy_ip, (*if_ip).ip6, IPV6_BYTE_LENGTH); + } else { + // Should TC_ACT_OK outer. + return -EFAULT; + } } - __u32 host_ip[4]; - if (!is_ipv6 && (*if_ip).hasIp4) { - __builtin_memcpy(host_ip, (*if_ip).ip4, IPV6_BYTE_LENGTH); - } else if (is_ipv6 && (*if_ip).hasIp6) { - __builtin_memcpy(host_ip, (*if_ip).ip6, IPV6_BYTE_LENGTH); - } else { - // Should TC_ACT_OK outer. - return -EFAULT; - } - if (first_interface_ip) { - __builtin_memcpy(*first_interface_ip, host_ip, IPV6_BYTE_LENGTH); - } - if (equal_ipv6(ip, host_ip)) { - return 1; - } - return 0; + + struct lpm_key lpm_key; + lpm_key.trie_key.prefixlen = IPV6_BYTE_LENGTH * 8; + __builtin_memcpy(lpm_key.data, ip, IPV6_BYTE_LENGTH); + return bpf_map_lookup_elem(&host_ip_lpm, &lpm_key) ? 1 : 0; } static __always_inline long adjust_udp_len(struct __sk_buff *skb, __u16 oldlen, @@ -597,9 +582,9 @@ static __always_inline long encap_after_udp_hdr(struct __sk_buff *skb, __be16 iphdr_tot_len, void *newhdr, __u32 newhdrlen) { if (unlikely(newhdrlen % 4 != 0)) { - bpf_trace_printk("encap_after_udp_hdr: unexpected newhdrlen value %u :must " - "be a multiple of 4", - newhdrlen); + bpf_printk("encap_after_udp_hdr: unexpected newhdrlen value %u :must " + "be a multiple of 4", + newhdrlen); return -EINVAL; } @@ -664,10 +649,9 @@ static __always_inline int decap_after_udp_hdr(struct __sk_buff *skb, __be16 iphdr_tot_len, void *to, __u32 decap_hdrlen) { if (unlikely(decap_hdrlen % 4 != 0)) { - bpf_trace_printk( - "encap_after_udp_hdr: unexpected decap_hdrlen value %u :must " - "be a multiple of 4", - decap_hdrlen); + bpf_printk("encap_after_udp_hdr: unexpected decap_hdrlen value %u :must " + "be a multiple of 4", + decap_hdrlen); return -EINVAL; } long ret = 0; @@ -742,15 +726,7 @@ static long routing(__u8 flag[2], void *l4_hdr, __be32 saddr[4], __be32 daddr[4], __be32 mac[4]) { #define _l4proto flag[0] #define _ipversion flag[1] - // // Get len of routings and epoch from param_map. - // __u32 *routings_len = bpf_map_lookup_elem(¶m_map, &routings_len_key); - // if (!routings_len) { - // return -EINVAL; - // } - // __u32 *epoch = bpf_map_lookup_elem(¶m_map, &epoch_key); - // if (!epoch) { - // return -EINVAL; - // } + // Define variables for further use. __u16 h_dport; __u16 h_sport; @@ -786,13 +762,6 @@ static long routing(__u8 flag[2], void *l4_hdr, __be32 saddr[4], bool bad_rule = false; struct domain_routing *domain_routing; - /// DEPRECATED: Epoch was deprecated and domain_routing_map was unpinned, thus - /// this branch will never hit. - // if (domain_routing && domain_routing->epoch != *epoch) { - // // Dirty (epoch dismatch) traffic should be routed by the control plane. - // return OUTBOUND_CONTROL_PLANE_DIRECT; - // } - #pragma unroll for (__u32 key = 0; key < MAX_ROUTING_LEN; key++) { __u32 k = key; // Clone to pass code checker. @@ -895,6 +864,7 @@ int tproxy_ingress(struct __sk_buff *skb) { struct udphdr *udph; __sum16 bak_cksm; __u8 ihl; + bool tcp_state_syn; long ret = parse_transport(skb, ðh, &iph, &ipv6h, &tcph, &udph, &ihl); if (ret) { bpf_printk("parse_transport: %ld", ret); @@ -906,13 +876,7 @@ int tproxy_ingress(struct __sk_buff *skb) { // Backup for further use. __u8 l4_proto; - if (tcph) { - l4_proto = IPPROTO_TCP; - } else if (udph) { - l4_proto = IPPROTO_UDP; - } else { - return TC_ACT_OK; - } + __be16 ip_tot_len = 0; // Parse saddr and daddr as ipv6 format. __be32 saddr[4]; @@ -927,6 +891,8 @@ int tproxy_ingress(struct __sk_buff *skb) { daddr[1] = 0; daddr[2] = bpf_htonl(0x0000ffff); daddr[3] = iph->daddr; + + ip_tot_len = iph->tot_len; } else if (ipv6h) { __builtin_memcpy(daddr, &ipv6h->daddr, IPV6_BYTE_LENGTH); __builtin_memcpy(saddr, &ipv6h->saddr, IPV6_BYTE_LENGTH); @@ -935,8 +901,8 @@ int tproxy_ingress(struct __sk_buff *skb) { } // If this packet is sent to this host, accept it. - __u32 first_interface_ip[4]; - long to_host = ip_is_host(ipv6h, skb->ifindex, daddr, &first_interface_ip); + __u32 tproxy_ip[4]; + long to_host = ip_is_host(ipv6h, skb->ifindex, daddr, tproxy_ip); if (to_host < 0) { // error // bpf_printk("to_host: %ld", to_host); return TC_ACT_OK; @@ -946,7 +912,6 @@ int tproxy_ingress(struct __sk_buff *skb) { // To host:53. Process it. } else { // To host. Accept. - /// FIXME: all host ip. return TC_ACT_OK; } } @@ -958,8 +923,9 @@ int tproxy_ingress(struct __sk_buff *skb) { if (tcph) { // Backup for further use. + l4_proto = IPPROTO_TCP; bak_cksm = tcph->check; - bool tcp_state_syn = tcph->syn && !tcph->ack; + tcp_state_syn = tcph->syn && !tcph->ack; struct ip_port_proto key_src; __builtin_memset(&key_src, 0, sizeof(key_src)); __builtin_memcpy(key_src.ip, saddr, IPV6_BYTE_LENGTH); @@ -1003,6 +969,8 @@ int tproxy_ingress(struct __sk_buff *skb) { bpf_printk("tcp: outbound: %u, %pI6", outbound, daddr); if (outbound == OUTBOUND_DIRECT) { return TC_ACT_OK; + } else if (unlikely(outbound == OUTBOUND_BLOCK)) { + return TC_ACT_SHOT; } else { // Rewrite to control plane. @@ -1017,8 +985,8 @@ int tproxy_ingress(struct __sk_buff *skb) { __u32 *dst_ip = daddr; __u16 dst_port = tcph->dest; - if ((ret = rewrite_ip(skb, ipv6h, IPPROTO_TCP, ihl, dst_ip, - first_interface_ip, true))) { + if ((ret = rewrite_ip(skb, ipv6h, IPPROTO_TCP, ihl, dst_ip, tproxy_ip, + true))) { bpf_printk("Shot IP: %ld", ret); return TC_ACT_SHOT; } @@ -1031,6 +999,7 @@ int tproxy_ingress(struct __sk_buff *skb) { } else if (udph) { // Backup for further use. bak_cksm = udph->check; + l4_proto = IPPROTO_UDP; struct ip_port_outbound new_hdr; __builtin_memset(&new_hdr, 0, sizeof(new_hdr)); __builtin_memcpy(new_hdr.ip, daddr, IPV6_BYTE_LENGTH); @@ -1059,18 +1028,19 @@ int tproxy_ingress(struct __sk_buff *skb) { if (new_hdr.outbound == OUTBOUND_DIRECT) { return TC_ACT_OK; + } else if (unlikely(new_hdr.outbound == OUTBOUND_BLOCK)) { + return TC_ACT_SHOT; } else { // Rewrite to control plane. // Encap a header to transmit fullcone tuple. - __be16 ip_tot_len = iph ? iph->tot_len : 0; encap_after_udp_hdr(skb, ipv6h, ihl, ip_tot_len, &new_hdr, sizeof(new_hdr)); // Rewrite udp dst ip. // bpf_printk("rewrite dst ip from %pI4", &ori_dst.ip); - if ((ret = rewrite_ip(skb, ipv6h, IPPROTO_UDP, ihl, new_hdr.ip, - first_interface_ip, true))) { + if ((ret = rewrite_ip(skb, ipv6h, IPPROTO_UDP, ihl, new_hdr.ip, tproxy_ip, + true))) { bpf_printk("Shot IP: %ld", ret); return TC_ACT_SHOT; } @@ -1129,6 +1099,7 @@ int tproxy_egress(struct __sk_buff *skb) { struct tcphdr *tcph; struct udphdr *udph; __sum16 bak_cksm; + __u8 l4_proto; __u8 ihl; long ret = parse_transport(skb, ðh, &iph, &ipv6h, &tcph, &udph, &ihl); if (ret) { @@ -1138,6 +1109,7 @@ int tproxy_egress(struct __sk_buff *skb) { // Parse saddr and daddr as ipv6 format. __be32 saddr[4]; __be32 daddr[4]; + __be16 ip_tot_len = 0; if (iph) { saddr[0] = 0; saddr[1] = 0; @@ -1148,6 +1120,8 @@ int tproxy_egress(struct __sk_buff *skb) { daddr[1] = 0; daddr[2] = bpf_htonl(0x0000ffff); daddr[3] = iph->daddr; + + ip_tot_len = iph->tot_len; } else if (ipv6h) { __builtin_memcpy(daddr, ipv6h->daddr.in6_u.u6_addr32, IPV6_BYTE_LENGTH); __builtin_memcpy(saddr, ipv6h->saddr.in6_u.u6_addr32, IPV6_BYTE_LENGTH); @@ -1160,23 +1134,17 @@ int tproxy_egress(struct __sk_buff *skb) { if (!tproxy_port) { return TC_ACT_OK; } - long from_host = ip_is_host(ipv6h, skb->ifindex, saddr, NULL); - if (!(from_host == 1)) { - // Not from localhost. + __be32 tproxy_ip[4]; + ret = ip_is_host(ipv6h, skb->ifindex, saddr, tproxy_ip); + if (!(ret == 1)) { + return TC_ACT_OK; + } + if (!equal_ipv6_format(saddr, tproxy_ip)) { return TC_ACT_OK; } - // Backup for further use. - __u8 l4_proto; if (tcph) { l4_proto = IPPROTO_TCP; - } else if (udph) { - l4_proto = IPPROTO_UDP; - } else { - return TC_ACT_OK; - } - - if (tcph) { if (tcph->source != *tproxy_port) { return TC_ACT_OK; } @@ -1216,6 +1184,7 @@ int tproxy_egress(struct __sk_buff *skb) { return TC_ACT_SHOT; } } else if (udph) { + l4_proto = IPPROTO_UDP; if (udph->source != *tproxy_port) { return TC_ACT_OK; } @@ -1233,7 +1202,6 @@ int tproxy_egress(struct __sk_buff *skb) { // Get source ip/port from our packet header. // Decap header to get fullcone tuple. - __be16 ip_tot_len = iph ? iph->tot_len : 0; decap_after_udp_hdr(skb, ipv6h, ihl, ip_tot_len, &ori_src, sizeof(ori_src)); // Rewrite udp src ip @@ -1274,7 +1242,7 @@ int tproxy_egress(struct __sk_buff *skb) { if (*disable_l4_checksum == DISABLE_L4_CHECKSUM_POLICY_SET_ZERO) { bak_cksm = 0; } - bpf_skb_store_bytes(skb, l4_cksm_off, &bak_cksm, 2, 0); + bpf_skb_store_bytes(skb, l4_cksm_off, &bak_cksm, sizeof(bak_cksm), 0); } } return TC_ACT_OK; diff --git a/component/control/routing_matcher_builder.go b/component/control/routing_matcher_builder.go index 4228a36..8a160da 100644 --- a/component/control/routing_matcher_builder.go +++ b/component/control/routing_matcher_builder.go @@ -20,6 +20,7 @@ type DomainSet struct { RuleIndex int Domains []string } + type RoutingMatcherBuilder struct { *routing.DefaultMatcherBuilder outboundName2Id map[string]uint8 @@ -74,7 +75,7 @@ func (b *RoutingMatcherBuilder) AddDomain(key string, values []string, outbound }) } -func (b *RoutingMatcherBuilder) AddMac(macAddrs [][6]byte, outbound string) { +func (b *RoutingMatcherBuilder) AddSourceMac(macAddrs [][6]byte, outbound string) { if b.err != nil { return } @@ -108,7 +109,7 @@ func (b *RoutingMatcherBuilder) AddIp(values []netip.Prefix, outbound string) { }) } -func (b *RoutingMatcherBuilder) AddSource(values []netip.Prefix, outbound string) { +func (b *RoutingMatcherBuilder) AddSourceIp(values []netip.Prefix, outbound string) { if b.err != nil { return } @@ -166,9 +167,9 @@ func (b *RoutingMatcherBuilder) Build() (err error) { keys = append(keys, cidrToBpfLpmKey(cidr)) values = append(values, 1) } - m, err := b.bpf.NewLpmMap(keys, values) + m, err := b.bpf.newLpmMap(keys, values) if err != nil { - return fmt.Errorf("NewLpmMap: %w", err) + return fmt.Errorf("newLpmMap: %w", err) } // ebpf.Map cannot be BatchUpdate if err = b.bpf.LpmArrayMap.Update(uint32(i), m, ebpf.UpdateAny); err != nil { @@ -177,7 +178,12 @@ func (b *RoutingMatcherBuilder) Build() (err error) { } m.Close() } - // Update routings. + // Write routings. + // Final rule MUST be the last. + if b.rules[len(b.rules)-1].Type != uint8(consts.RoutingType_Final) { + b.err = fmt.Errorf("final rule MUST be the last") + return b.err + } routingsLen := uint32(len(b.rules)) routingsKeys := common.ARangeU32(routingsLen) if _, err = b.bpf.RoutingMap.BatchUpdate(routingsKeys, b.rules, &ebpf.BatchOptions{ @@ -185,8 +191,5 @@ func (b *RoutingMatcherBuilder) Build() (err error) { }); err != nil { return fmt.Errorf("BatchUpdate: %w", err) } - if err = b.bpf.ParamMap.Update(consts.RoutingsLenKey, routingsLen, ebpf.UpdateAny); err != nil { - return fmt.Errorf("Update: %w", err) - } return nil } diff --git a/component/outbound/dialer/block.go b/component/outbound/dialer/block.go new file mode 100644 index 0000000..781fbe8 --- /dev/null +++ b/component/outbound/dialer/block.go @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2023, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package dialer + +import ( + "github.com/sirupsen/logrus" + "net" +) + +type blockDialer struct{} + +func (*blockDialer) Dial(network string, addr string) (c net.Conn, err error) { + return nil, net.ErrClosed +} + +func NewBlockDialer(log *logrus.Logger) *Dialer { + return newDialer(&blockDialer{}, log, true, "block", "block", "") +} diff --git a/component/outbound/dialer/dialer.go b/component/outbound/dialer/dialer.go index 266ecc1..ee284d4 100644 --- a/component/outbound/dialer/dialer.go +++ b/component/outbound/dialer/dialer.go @@ -24,6 +24,8 @@ var ( ) type Dialer struct { + log *logrus.Logger + proxy.Dialer supportUDP bool name string @@ -40,16 +42,17 @@ type Dialer struct { } // NewDialer is for register in general. -func NewDialer(dialer proxy.Dialer, supportUDP bool, name string, protocol string, link string) *Dialer { - d := newDialer(dialer, supportUDP, name, protocol, link) +func NewDialer(dialer proxy.Dialer, log *logrus.Logger, supportUDP bool, name string, protocol string, link string) *Dialer { + d := newDialer(dialer, log, supportUDP, name, protocol, link) go d.aliveBackground() return d } // newDialer does not run background tasks. -func newDialer(dialer proxy.Dialer, supportUDP bool, name string, protocol string, link string) *Dialer { +func newDialer(dialer proxy.Dialer, log *logrus.Logger, supportUDP bool, name string, protocol string, link string) *Dialer { d := &Dialer{ Dialer: dialer, + log: log, supportUDP: supportUDP, name: name, protocol: protocol, @@ -132,13 +135,13 @@ func (d *Dialer) Test(timeout time.Duration, url string) (ok bool, err error) { // No error. latency := time.Since(start) // FIXME: Use log instead of logrus. - logrus.Debugf("Connectivity Test <%v>: %v", d.name, latency) + d.log.Debugf("Connectivity Test <%v>: %v", d.name, latency) d.Latencies10.AppendLatency(latency) alive = true } else { // Append timeout if there is any error or unexpected status code. if err != nil { - logrus.Debugf("Connectivity Test <%v>: %v", d.name, err.Error()) + d.log.Debugf("Connectivity Test <%v>: %v", d.name, err.Error()) } d.Latencies10.AppendLatency(timeout) } diff --git a/component/outbound/dialer/direct.go b/component/outbound/dialer/direct.go index 1cdf6da..e62c283 100644 --- a/component/outbound/dialer/direct.go +++ b/component/outbound/dialer/direct.go @@ -1,15 +1,21 @@ package dialer import ( + "github.com/sirupsen/logrus" "golang.org/x/net/proxy" "net" ) -var SymmetricDirect = NewDirect(false) -var FullconeDirect = NewDirect(true) +var SymmetricDirect = newDirect(false) +var FullconeDirect = newDirect(true) -var SymmetricDirectDialer = newDialer(SymmetricDirect, true, "direct", "direct", "") -var FullconeDirectDialer = newDialer(FullconeDirect, true, "direct", "direct", "") +func NewDirectDialer(log *logrus.Logger, fullcone bool) *Dialer { + if fullcone { + return newDialer(FullconeDirect, log, true, "direct", "direct", "") + } else { + return newDialer(SymmetricDirect, log, true, "direct", "direct", "") + } +} type direct struct { proxy.Dialer @@ -17,7 +23,7 @@ type direct struct { fullCone bool } -func NewDirect(fullCone bool) proxy.Dialer { +func newDirect(fullCone bool) proxy.Dialer { return &direct{ netDialer: net.Dialer{}, fullCone: fullCone, diff --git a/component/outbound/dialer/http/http.go b/component/outbound/dialer/http/http.go index 9588eb2..25abbae 100644 --- a/component/outbound/dialer/http/http.go +++ b/component/outbound/dialer/http/http.go @@ -2,9 +2,9 @@ package http import ( "fmt" - "github.com/v2rayA/dae/component/outbound/dialer" "github.com/mzz2017/softwind/protocol/http" - "gopkg.in/yaml.v3" + "github.com/sirupsen/logrus" + "github.com/v2rayA/dae/component/outbound/dialer" "net" "net/url" "strconv" @@ -13,7 +13,6 @@ import ( func init() { dialer.FromLinkRegister("http", NewHTTP) dialer.FromLinkRegister("https", NewHTTP) - dialer.FromClashRegister("http", NewSocks5FromClashObj) } type HTTP struct { @@ -26,20 +25,12 @@ type HTTP struct { Protocol string `json:"protocol"` } -func NewHTTP(link string) (*dialer.Dialer, error) { +func NewHTTP(log *logrus.Logger, link string) (*dialer.Dialer, error) { s, err := ParseHTTPURL(link) if err != nil { return nil, fmt.Errorf("%w: %v", dialer.InvalidParameterErr, err) } - return s.Dialer() -} - -func NewSocks5FromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClash(o) - if err != nil { - return nil, err - } - return s.Dialer() + return s.Dialer(log) } func ParseHTTPURL(link string) (data *HTTP, err error) { @@ -71,46 +62,13 @@ func ParseHTTPURL(link string) (data *HTTP, err error) { }, nil } -func ParseClash(o *yaml.Node) (data *HTTP, err error) { - type HttpOption struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - UserName string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - TLS bool `yaml:"tls,omitempty"` - SNI string `yaml:"sni,omitempty"` - SkipCertVerify bool `yaml:"skip-cert-verify,omitempty"` - } - var option HttpOption - if err = o.Decode(&option); err != nil { - return nil, err - } - scheme := "http" - if option.TLS { - scheme = "https" - } - if option.SkipCertVerify { - return nil, fmt.Errorf("%w: skip-cert-verify=true", dialer.UnexpectedFieldErr) - } - return &HTTP{ - Name: option.Name, - Server: option.Server, - Port: option.Port, - Username: option.UserName, - Password: option.Password, - SNI: option.SNI, - Protocol: scheme, - }, nil -} - -func (s *HTTP) Dialer() (*dialer.Dialer, error) { +func (s *HTTP) Dialer(log *logrus.Logger) (*dialer.Dialer, error) { u := s.URL() - d, err := http.NewHTTPProxy(&u, dialer.SymmetricDirect) + d, err := http.NewHTTPProxy(&u, dialer.SymmetricDirect) // HTTP Proxy does not support full-cone. if err != nil { return nil, err } - return dialer.NewDialer(d, false, s.Name, s.Protocol, u.String()), nil + return dialer.NewDialer(d, log, false, s.Name, s.Protocol, u.String()), nil } func (s *HTTP) URL() url.URL { diff --git a/component/outbound/dialer/register.go b/component/outbound/dialer/register.go index 595e54e..3ba755e 100644 --- a/component/outbound/dialer/register.go +++ b/component/outbound/dialer/register.go @@ -7,11 +7,11 @@ package dialer import ( "fmt" - "gopkg.in/yaml.v3" + "github.com/sirupsen/logrus" "net/url" ) -type FromLinkCreator func(link string) (dialer *Dialer, err error) +type FromLinkCreator func(log *logrus.Logger, link string) (dialer *Dialer, err error) var fromLinkCreators = make(map[string]FromLinkCreator) @@ -19,35 +19,14 @@ func FromLinkRegister(name string, creator FromLinkCreator) { fromLinkCreators[name] = creator } -func NewFromLink(link string) (dialer *Dialer, err error) { +func NewFromLink(log *logrus.Logger, link string) (dialer *Dialer, err error) { u, err := url.Parse(link) if err != nil { return nil, err } if creator, ok := fromLinkCreators[u.Scheme]; ok { - return creator(link) + return creator(log, link) } else { return nil, fmt.Errorf("unexpected link type: %v", u.Scheme) } } - -type FromClashCreator func(clashObj *yaml.Node) (dialer *Dialer, err error) - -var fromClashCreators = make(map[string]FromClashCreator) - -func FromClashRegister(name string, creator FromClashCreator) { - fromClashCreators[name] = creator -} - -func NewFromClash(clashObj *yaml.Node) (dialer *Dialer, err error) { - preUnload := make(map[string]interface{}) - if err := clashObj.Decode(&preUnload); err != nil { - return nil, err - } - name, _ := preUnload["type"].(string) - if creator, ok := fromClashCreators[name]; ok { - return creator(clashObj) - } else { - return nil, fmt.Errorf("unexpected link type: %v", name) - } -} diff --git a/component/outbound/dialer/shadowsocks/shadowsocks.go b/component/outbound/dialer/shadowsocks/shadowsocks.go index 48a8ee8..d163c61 100644 --- a/component/outbound/dialer/shadowsocks/shadowsocks.go +++ b/component/outbound/dialer/shadowsocks/shadowsocks.go @@ -3,12 +3,12 @@ package shadowsocks import ( "encoding/base64" "fmt" - "github.com/v2rayA/dae/common" - "github.com/v2rayA/dae/component/outbound/dialer" - "github.com/v2rayA/dae/component/outbound/dialer/transport/simpleobfs" "github.com/mzz2017/softwind/protocol" "github.com/mzz2017/softwind/protocol/shadowsocks" - "gopkg.in/yaml.v3" + "github.com/sirupsen/logrus" + "github.com/v2rayA/dae/common" + "github.com/v2rayA/dae/component/outbound/dialer" + "github.com/v2rayA/dae/component/outbound/transport/simpleobfs" "net" "net/url" "strconv" @@ -21,7 +21,6 @@ func init() { dialer.FromLinkRegister("shadowsocks", NewShadowsocksFromLink) dialer.FromLinkRegister("ss", NewShadowsocksFromLink) - dialer.FromClashRegister("ss", NewShadowsocksFromClashObj) } type Shadowsocks struct { @@ -35,23 +34,15 @@ type Shadowsocks struct { Protocol string `json:"protocol"` } -func NewShadowsocksFromLink(link string) (*dialer.Dialer, error) { +func NewShadowsocksFromLink(log *logrus.Logger, link string) (*dialer.Dialer, error) { s, err := ParseSSURL(link) if err != nil { return nil, err } - return s.Dialer() + return s.Dialer(log) } -func NewShadowsocksFromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClash(o) - if err != nil { - return nil, err - } - return s.Dialer() -} - -func (s *Shadowsocks) Dialer() (*dialer.Dialer, error) { +func (s *Shadowsocks) Dialer(log *logrus.Logger) (*dialer.Dialer, error) { // FIXME: support plain/none. switch s.Cipher { case "aes-256-gcm", "aes-128-gcm", "chacha20-poly1305", "chacha20-ietf-poly1305": @@ -59,7 +50,7 @@ func (s *Shadowsocks) Dialer() (*dialer.Dialer, error) { return nil, fmt.Errorf("unsupported shadowsocks encryption method: %v", s.Cipher) } supportUDP := s.UDP - d := dialer.SymmetricDirect + d := dialer.FullconeDirect // Shadowsocks Proxy supports full-cone. d, err := protocol.NewDialer("shadowsocks", d, protocol.Header{ ProxyAddress: net.JoinHostPort(s.Server, strconv.Itoa(s.Port)), Cipher: s.Cipher, @@ -86,46 +77,7 @@ func (s *Shadowsocks) Dialer() (*dialer.Dialer, error) { } supportUDP = false } - return dialer.NewDialer(d, supportUDP, s.Name, s.Protocol, s.ExportToURL()), nil -} - -func ParseClash(o *yaml.Node) (data *Shadowsocks, err error) { - type simpleObfsOption struct { - Mode string `obfs:"mode,omitempty"` - Host string `obfs:"host,omitempty"` - } - type ShadowSocksOption struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - Password string `yaml:"password"` - Cipher string `yaml:"cipher"` - UDP bool `yaml:"udp,omitempty"` - Plugin string `yaml:"plugin,omitempty"` - PluginOpts simpleObfsOption `yaml:"plugin-opts,omitempty"` - } - var option ShadowSocksOption - if err = o.Decode(&option); err != nil { - return nil, err - } - data = &Shadowsocks{ - Name: option.Name, - Server: option.Server, - Port: option.Port, - Password: option.Password, - Cipher: option.Cipher, - UDP: option.UDP, - Protocol: "shadowsocks", - } - if option.Plugin == "obfs" { - data.Plugin.Name = "simple-obfs" - data.Plugin.Opts.Obfs = option.PluginOpts.Mode - data.Plugin.Opts.Host = data.Plugin.Opts.Host - if data.Plugin.Opts.Host == "" { - data.Plugin.Opts.Host = "bing.com" - } - } - return data, nil + return dialer.NewDialer(d, log, supportUDP, s.Name, s.Protocol, s.ExportToURL()), nil } func ParseSSURL(u string) (data *Shadowsocks, err error) { diff --git a/component/outbound/dialer/shadowsocksr/shadowsocksr.go b/component/outbound/dialer/shadowsocksr/shadowsocksr.go index 5f64792..2063c01 100644 --- a/component/outbound/dialer/shadowsocksr/shadowsocksr.go +++ b/component/outbound/dialer/shadowsocksr/shadowsocksr.go @@ -3,10 +3,10 @@ package shadowsocksr import ( "encoding/base64" "fmt" + "github.com/sirupsen/logrus" "github.com/v2rayA/dae/common" "github.com/v2rayA/dae/component/outbound/dialer" ssr "github.com/v2rayA/shadowsocksR/client" - "gopkg.in/yaml.v3" "net" "net/url" "strconv" @@ -16,7 +16,6 @@ import ( func init() { dialer.FromLinkRegister("shadowsocksr", NewShadowsocksR) dialer.FromLinkRegister("ssr", NewShadowsocksR) - dialer.FromClashRegister("ssr", NewShadowsocksRFromClashObj) } type ShadowsocksR struct { @@ -32,23 +31,15 @@ type ShadowsocksR struct { Protocol string `json:"protocol"` } -func NewShadowsocksR(link string) (*dialer.Dialer, error) { +func NewShadowsocksR(log *logrus.Logger, link string) (*dialer.Dialer, error) { s, err := ParseSSRURL(link) if err != nil { return nil, err } - return s.Dialer() + return s.Dialer(log) } -func NewShadowsocksRFromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClash(o) - if err != nil { - return nil, err - } - return s.Dialer() -} - -func (s *ShadowsocksR) Dialer() (*dialer.Dialer, error) { +func (s *ShadowsocksR) Dialer(log *logrus.Logger) (*dialer.Dialer, error) { u := url.URL{ Scheme: "ssr", User: url.UserPassword(s.Cipher, s.Password), @@ -60,42 +51,11 @@ func (s *ShadowsocksR) Dialer() (*dialer.Dialer, error) { "obfs_param": []string{s.ObfsParam}, }.Encode(), } - d, err := ssr.NewSSR(u.String(), dialer.SymmetricDirect, nil) + d, err := ssr.NewSSR(u.String(), dialer.SymmetricDirect, nil) // SSR Proxy does not support full-cone. if err != nil { return nil, err } - return dialer.NewDialer(d, false, s.Name, s.Protocol, s.ExportToURL()), nil -} - -func ParseClash(o *yaml.Node) (data *ShadowsocksR, err error) { - type ShadowSocksROption struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - Password string `yaml:"password"` - Cipher string `yaml:"cipher"` - Obfs string `yaml:"obfs"` - ObfsParam string `yaml:"obfs-param,omitempty"` - Protocol string `yaml:"protocol"` - ProtocolParam string `yaml:"protocol-param,omitempty"` - UDP bool `yaml:"udp,omitempty"` - } - var option ShadowSocksROption - if err = o.Decode(&option); err != nil { - return nil, err - } - return &ShadowsocksR{ - Name: option.Name, - Server: option.Server, - Port: option.Port, - Password: option.Password, - Cipher: option.Cipher, - Proto: option.Protocol, - ProtoParam: option.ProtocolParam, - Obfs: option.Obfs, - ObfsParam: option.ObfsParam, - Protocol: "shadowsocksr", - }, nil + return dialer.NewDialer(d, log, false, s.Name, s.Protocol, s.ExportToURL()), nil } func ParseSSRURL(u string) (data *ShadowsocksR, err error) { diff --git a/component/outbound/dialer/socks/socks.go b/component/outbound/dialer/socks/socks.go index e3fe21f..0a8ffd1 100644 --- a/component/outbound/dialer/socks/socks.go +++ b/component/outbound/dialer/socks/socks.go @@ -2,10 +2,10 @@ package socks import ( "fmt" + "github.com/sirupsen/logrus" "github.com/v2rayA/dae/component/outbound/dialer" //"github.com/mzz2017/softwind/protocol/socks4" "github.com/mzz2017/softwind/protocol/socks5" - "gopkg.in/yaml.v3" "net" "net/url" "strconv" @@ -16,7 +16,6 @@ func init() { //dialer.FromLinkRegister("socks4", NewSocks) //dialer.FromLinkRegister("socks4a", NewSocks) dialer.FromLinkRegister("socks5", NewSocks) - dialer.FromClashRegister("socks5", NewSocks5FromClashObj) } type Socks struct { @@ -28,31 +27,23 @@ type Socks struct { Protocol string `json:"protocol"` } -func NewSocks(link string) (*dialer.Dialer, error) { +func NewSocks(log *logrus.Logger, link string) (*dialer.Dialer, error) { s, err := ParseSocksURL(link) if err != nil { return nil, dialer.InvalidParameterErr } - return s.Dialer() + return s.Dialer(log) } -func NewSocks5FromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClashSocks5(o) - if err != nil { - return nil, err - } - return s.Dialer() -} - -func (s *Socks) Dialer() (*dialer.Dialer, error) { +func (s *Socks) Dialer(log *logrus.Logger) (*dialer.Dialer, error) { link := s.ExportToURL() switch s.Protocol { case "", "socks", "socks5": - d, err := socks5.NewSocks5Dialer(link, dialer.FullconeDirectDialer) + d, err := socks5.NewSocks5Dialer(link, dialer.FullconeDirect) // Socks5 Proxy supports full-cone. if err != nil { return nil, err } - return dialer.NewDialer(d, true, s.Name, s.Protocol, link), nil + return dialer.NewDialer(d, log, true, s.Name, s.Protocol, link), nil //case "socks4", "socks4a": // d, err := socks4.NewSocks4Dialer(link, &proxy.Direct{}) // if err != nil { @@ -64,37 +55,6 @@ func (s *Socks) Dialer() (*dialer.Dialer, error) { } } -func ParseClashSocks5(o *yaml.Node) (data *Socks, err error) { - type Socks5Option struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - UserName string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` - TLS bool `yaml:"tls,omitempty"` - UDP bool `yaml:"udp,omitempty"` - SkipCertVerify bool `yaml:"skip-cert-verify,omitempty"` - } - var option Socks5Option - if err = o.Decode(&option); err != nil { - return nil, err - } - if option.TLS { - return nil, fmt.Errorf("%w: tls=true", dialer.UnexpectedFieldErr) - } - if option.SkipCertVerify { - return nil, fmt.Errorf("%w: skip-cert-verify=true", dialer.UnexpectedFieldErr) - } - return &Socks{ - Name: option.Name, - Server: option.Server, - Port: option.Port, - Username: option.UserName, - Password: option.Password, - Protocol: "socks5", - }, nil -} - func ParseSocksURL(link string) (data *Socks, err error) { u, err := url.Parse(link) if err != nil { diff --git a/component/outbound/dialer/trojan/trojan.go b/component/outbound/dialer/trojan/trojan.go index 3cff464..12337e6 100644 --- a/component/outbound/dialer/trojan/trojan.go +++ b/component/outbound/dialer/trojan/trojan.go @@ -2,13 +2,13 @@ package trojan import ( "fmt" - "github.com/v2rayA/dae/common" - "github.com/v2rayA/dae/component/outbound/dialer" - "github.com/v2rayA/dae/component/outbound/dialer/transport/tls" - "github.com/v2rayA/dae/component/outbound/dialer/transport/ws" "github.com/mzz2017/softwind/protocol" "github.com/mzz2017/softwind/transport/grpc" - "gopkg.in/yaml.v3" + "github.com/sirupsen/logrus" + "github.com/v2rayA/dae/common" + "github.com/v2rayA/dae/component/outbound/dialer" + "github.com/v2rayA/dae/component/outbound/transport/tls" + "github.com/v2rayA/dae/component/outbound/transport/ws" "net" "net/url" "strconv" @@ -18,7 +18,6 @@ import ( func init() { dialer.FromLinkRegister("trojan", NewTrojan) dialer.FromLinkRegister("trojan-go", NewTrojan) - dialer.FromClashRegister("trojan", NewTrojanFromClashObj) } type Trojan struct { @@ -36,24 +35,16 @@ type Trojan struct { Protocol string `json:"protocol"` } -func NewTrojan(link string) (*dialer.Dialer, error) { +func NewTrojan(log *logrus.Logger, link string) (*dialer.Dialer, error) { s, err := ParseTrojanURL(link) if err != nil { return nil, err } - return s.Dialer() + return s.Dialer(log) } -func NewTrojanFromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClash(o) - if err != nil { - return nil, err - } - return s.Dialer() -} - -func (s *Trojan) Dialer() (*dialer.Dialer, error) { - d := dialer.SymmetricDirect +func (s *Trojan) Dialer(log *logrus.Logger) (*dialer.Dialer, error) { + d := dialer.FullconeDirect // Trojan Proxy supports full-cone. u := url.URL{ Scheme: "tls", Host: net.JoinHostPort(s.Server, strconv.Itoa(s.Port)), @@ -111,7 +102,7 @@ func (s *Trojan) Dialer() (*dialer.Dialer, error) { }); err != nil { return nil, err } - return dialer.NewDialer(d, true, s.Name, s.Protocol, s.ExportToURL()), nil + return dialer.NewDialer(d, log, true, s.Name, s.Protocol, s.ExportToURL()), nil } func ParseTrojanURL(u string) (data *Trojan, err error) { @@ -160,53 +151,6 @@ func ParseTrojanURL(u string) (data *Trojan, err error) { return data, nil } -func ParseClash(o *yaml.Node) (data *Trojan, err error) { - type WSOptions struct { - Path string `yaml:"path,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - MaxEarlyData int `yaml:"max-early-data,omitempty"` - EarlyDataHeaderName string `yaml:"early-data-header-name,omitempty"` - } - type GrpcOptions struct { - GrpcServiceName string `proxy:"grpc-service-name,omitempty"` - } - type TrojanOption struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - Password string `yaml:"password"` - ALPN []string `yaml:"alpn,omitempty"` - SNI string `yaml:"sni,omitempty"` - SkipCertVerify bool `yaml:"skip-cert-verify,omitempty"` - UDP bool `yaml:"udp,omitempty"` - Network string `yaml:"network,omitempty"` - GrpcOpts GrpcOptions `yaml:"grpc-opts,omitempty"` - WSOpts WSOptions `yaml:"ws-opts,omitempty"` - } - var option TrojanOption - if err = o.Decode(&option); err != nil { - return nil, err - } - proto := "trojan" - if option.Network != "" && option.Network != "origin" { - proto = "trojan-go" - } - return &Trojan{ - Name: option.Name, - Server: option.Server, - Port: option.Port, - Password: option.Password, - Sni: option.SNI, - Type: option.Network, - Encryption: "", - Host: option.WSOpts.Headers["Host"], - Path: option.WSOpts.Path, - AllowInsecure: option.SkipCertVerify, - ServiceName: option.GrpcOpts.GrpcServiceName, - Protocol: proto, - }, nil -} - func (t *Trojan) ExportToURL() string { u := &url.URL{ Scheme: "trojan", diff --git a/component/outbound/dialer/v2ray/v2ray.go b/component/outbound/dialer/v2ray/v2ray.go index e5b4b60..ef437d4 100644 --- a/component/outbound/dialer/v2ray/v2ray.go +++ b/component/outbound/dialer/v2ray/v2ray.go @@ -3,25 +3,24 @@ package v2ray import ( "encoding/base64" "fmt" - "github.com/v2rayA/dae/common" - "github.com/v2rayA/dae/component/outbound/dialer" - "github.com/v2rayA/dae/component/outbound/dialer/transport/tls" - "github.com/v2rayA/dae/component/outbound/dialer/transport/ws" jsoniter "github.com/json-iterator/go" "github.com/mzz2017/softwind/protocol" "github.com/mzz2017/softwind/transport/grpc" - "gopkg.in/yaml.v3" + "github.com/sirupsen/logrus" + "github.com/v2rayA/dae/common" + "github.com/v2rayA/dae/component/outbound/dialer" + "github.com/v2rayA/dae/component/outbound/transport/tls" + "github.com/v2rayA/dae/component/outbound/transport/ws" + "golang.org/x/net/proxy" "net" "net/url" "regexp" - "strconv" "strings" ) func init() { dialer.FromLinkRegister("vmess", NewV2Ray) dialer.FromLinkRegister("vless", NewV2Ray) - dialer.FromClashRegister("vmess", NewVMessFromClashObj) } type V2Ray struct { @@ -43,7 +42,7 @@ type V2Ray struct { Protocol string `json:"protocol"` } -func NewV2Ray(link string) (*dialer.Dialer, error) { +func NewV2Ray(log *logrus.Logger, link string) (*dialer.Dialer, error) { var ( s *V2Ray err error @@ -65,21 +64,19 @@ func NewV2Ray(link string) (*dialer.Dialer, error) { default: return nil, dialer.InvalidParameterErr } - return s.Dialer() + return s.Dialer(log) } -func NewVMessFromClashObj(o *yaml.Node) (*dialer.Dialer, error) { - s, err := ParseClashVMess(o) - if err != nil { - return nil, err +func (s *V2Ray) Dialer(log *logrus.Logger) (data *dialer.Dialer, err error) { + var d proxy.Dialer + switch s.Protocol { + case "vmess": + d = dialer.FullconeDirect // VMess Proxy supports full-cone. + case "vless": + d = dialer.SymmetricDirect // VLESS Proxy does not yet support full-cone by softwind. + default: + return nil, fmt.Errorf("V2Ray.Dialer: unexpected protocol: %v", s.Protocol) } - return s.Dialer() -} - -func (s *V2Ray) Dialer() (data *dialer.Dialer, err error) { - var ( - d = dialer.SymmetricDirect - ) switch strings.ToLower(s.Net) { case "ws": @@ -151,91 +148,9 @@ func (s *V2Ray) Dialer() (data *dialer.Dialer, err error) { }); err != nil { return nil, err } - return dialer.NewDialer(d, true, s.Ps, s.Protocol, s.ExportToURL()), nil + return dialer.NewDialer(d, log, true, s.Ps, s.Protocol, s.ExportToURL()), nil } -func ParseClashVMess(o *yaml.Node) (data *V2Ray, err error) { - type WSOptions struct { - Path string `yaml:"path,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - MaxEarlyData int `yaml:"max-early-data,omitempty"` - EarlyDataHeaderName string `yaml:"early-data-header-name,omitempty"` - } - type GrpcOptions struct { - GrpcServiceName string `proxy:"grpc-service-name,omitempty"` - } - type HTTP2Options struct { - Host []string `proxy:"host,omitempty"` - Path string `proxy:"path,omitempty"` - } - type VmessOption struct { - Name string `yaml:"name"` - Server string `yaml:"server"` - Port int `yaml:"port"` - UUID string `yaml:"uuid"` - AlterID int `yaml:"alterId"` - Cipher string `yaml:"cipher"` - UDP bool `yaml:"udp,omitempty"` - Network string `yaml:"network,omitempty"` - TLS bool `yaml:"tls,omitempty"` - SkipCertVerify bool `yaml:"skip-cert-verify,omitempty"` - ServerName string `yaml:"servername,omitempty"` - HTTPOpts interface{} `yaml:"http-opts,omitempty"` - HTTP2Opts HTTP2Options `yaml:"h2-opts,omitempty"` - GrpcOpts GrpcOptions `yaml:"grpc-opts,omitempty"` - WSOpts WSOptions `yaml:"ws-opts,omitempty"` - } - var option VmessOption - if err = o.Decode(&option); err != nil { - return nil, err - } - - if option.Network == "" { - option.Network = "tcp" - } - var ( - path string - host string - alpn string - ) - switch option.Network { - case "ws": - path = option.WSOpts.Path - host = option.WSOpts.Headers["Host"] - alpn = "http/1.1" - case "grpc": - path = option.GrpcOpts.GrpcServiceName - case "h2": - host = strings.Join(option.HTTP2Opts.Host, ",") - path = option.HTTP2Opts.Path - alpn = "h2" - case "http": - // TODO - } - s := &V2Ray{ - Ps: option.Name, - Add: option.Server, - Port: strconv.Itoa(option.Port), - ID: option.UUID, - Aid: strconv.Itoa(option.AlterID), - Net: option.Network, - Type: "none", // FIXME - Host: host, - SNI: option.ServerName, - Path: path, - AllowInsecure: false, - Alpn: alpn, - V: "2", - Protocol: "vmess", - } - if option.SkipCertVerify { - return nil, fmt.Errorf("%w: skip-cert-verify=true", dialer.UnexpectedFieldErr) - } - if option.TLS { - s.TLS = "tls" - } - return s, nil -} func ParseVlessURL(vless string) (data *V2Ray, err error) { u, err := url.Parse(vless) if err != nil { diff --git a/component/outbound/dialer_group.go b/component/outbound/dialer_group.go index 63106cc..9527a1b 100644 --- a/component/outbound/dialer_group.go +++ b/component/outbound/dialer_group.go @@ -7,9 +7,9 @@ package outbound import ( "fmt" + "github.com/sirupsen/logrus" "github.com/v2rayA/dae/common/consts" "github.com/v2rayA/dae/component/outbound/dialer" - "github.com/sirupsen/logrus" "golang.org/x/net/proxy" "net" ) @@ -21,6 +21,7 @@ type DialerSelectionPolicy struct { type DialerGroup struct { proxy.Dialer + block *dialer.Dialer log *logrus.Logger Name string @@ -58,6 +59,7 @@ func NewDialerGroup(log *logrus.Logger, name string, dialers []*dialer.Dialer, p log: log, Name: name, Dialers: dialers, + block: dialer.NewBlockDialer(log), AliveDialerSet: a, registeredAliveDialerSet: registeredAliveDialerSet, selectionPolicy: &p, @@ -89,9 +91,8 @@ func (g *DialerGroup) Select() (*dialer.Dialer, error) { d := g.AliveDialerSet.GetRand() if d == nil { // No alive dialer. - // TODO: Should we throw an error to block the connection in this condition? - g.log.Warnf("No alive dialer in DialerGroup %v, use DIRECT. It may cause IP leaking.", g.Name) - return dialer.FullconeDirectDialer, nil + g.log.Warnf("No alive dialer in DialerGroup %v, use \"block\".", g.Name) + return g.block, nil } return d, nil @@ -105,9 +106,8 @@ func (g *DialerGroup) Select() (*dialer.Dialer, error) { d := g.AliveDialerSet.GetMinLatency() if d == nil { // No alive dialer. - // TODO: Should we throw an error to block the connection in this condition? - g.log.Warnf("No alive dialer in DialerGroup %v, use DIRECT. It may cause IP leaking.", g.Name) - return dialer.FullconeDirectDialer, nil + g.log.Warnf("No alive dialer in DialerGroup %v, use \"block\".", g.Name) + return g.block, nil } return d, nil diff --git a/component/outbound/dialer_group_test.go b/component/outbound/dialer_group_test.go index b8aa704..168aee8 100644 --- a/component/outbound/dialer_group_test.go +++ b/component/outbound/dialer_group_test.go @@ -6,10 +6,10 @@ package outbound import ( + "github.com/mzz2017/softwind/pkg/fastrand" "github.com/v2rayA/dae/common/consts" "github.com/v2rayA/dae/component/outbound/dialer" "github.com/v2rayA/dae/pkg/logger" - "github.com/mzz2017/softwind/pkg/fastrand" "testing" "time" ) @@ -17,8 +17,8 @@ import ( func TestDialerGroup_Select_Fixed(t *testing.T) { log := logger.NewLogger(2) dialers := []*dialer.Dialer{ - dialer.SymmetricDirectDialer, - dialer.FullconeDirectDialer, + dialer.NewDirectDialer(log, true), + dialer.NewDirectDialer(log, false), } fixedIndex := 1 g := NewDialerGroup(log, "test-group", dialers, DialerSelectionPolicy{ @@ -51,16 +51,16 @@ func TestDialerGroup_Select_Fixed(t *testing.T) { func TestDialerGroup_Select_MinLastLatency(t *testing.T) { log := logger.NewLogger(2) dialers := []*dialer.Dialer{ - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), } g := NewDialerGroup(log, "test-group", dialers, DialerSelectionPolicy{ Policy: consts.DialerSelectionPolicy_MinLastLatency, @@ -114,11 +114,11 @@ func TestDialerGroup_Select_MinLastLatency(t *testing.T) { func TestDialerGroup_Select_Random(t *testing.T) { log := logger.NewLogger(2) dialers := []*dialer.Dialer{ - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), } g := NewDialerGroup(log, "test-group", dialers, DialerSelectionPolicy{ Policy: consts.DialerSelectionPolicy_Random, @@ -147,11 +147,11 @@ func TestDialerGroup_Select_Random(t *testing.T) { func TestDialerGroup_SetAlive(t *testing.T) { log := logger.NewLogger(2) dialers := []*dialer.Dialer{ - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), - dialer.NewDialer(dialer.SymmetricDirect, true, "direct", "direct", ""), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), + dialer.NewDirectDialer(log, false), } g := NewDialerGroup(log, "test-group", dialers, DialerSelectionPolicy{ Policy: consts.DialerSelectionPolicy_Random, diff --git a/component/outbound/outbound.go b/component/outbound/outbound.go index 240f6ea..841a236 100644 --- a/component/outbound/outbound.go +++ b/component/outbound/outbound.go @@ -6,14 +6,14 @@ package outbound import ( + _ "github.com/mzz2017/softwind/protocol/shadowsocks" + _ "github.com/mzz2017/softwind/protocol/trojanc" + _ "github.com/mzz2017/softwind/protocol/vless" + _ "github.com/mzz2017/softwind/protocol/vmess" _ "github.com/v2rayA/dae/component/outbound/dialer/http" _ "github.com/v2rayA/dae/component/outbound/dialer/shadowsocks" _ "github.com/v2rayA/dae/component/outbound/dialer/shadowsocksr" _ "github.com/v2rayA/dae/component/outbound/dialer/socks" _ "github.com/v2rayA/dae/component/outbound/dialer/trojan" _ "github.com/v2rayA/dae/component/outbound/dialer/v2ray" - _ "github.com/mzz2017/softwind/protocol/shadowsocks" - _ "github.com/mzz2017/softwind/protocol/trojanc" - _ "github.com/mzz2017/softwind/protocol/vless" - _ "github.com/mzz2017/softwind/protocol/vmess" ) diff --git a/component/outbound/dialer/transport/simpleobfs/http.go b/component/outbound/transport/simpleobfs/http.go similarity index 100% rename from component/outbound/dialer/transport/simpleobfs/http.go rename to component/outbound/transport/simpleobfs/http.go diff --git a/component/outbound/dialer/transport/simpleobfs/simpleobfs.go b/component/outbound/transport/simpleobfs/simpleobfs.go similarity index 100% rename from component/outbound/dialer/transport/simpleobfs/simpleobfs.go rename to component/outbound/transport/simpleobfs/simpleobfs.go diff --git a/component/outbound/dialer/transport/simpleobfs/tls.go b/component/outbound/transport/simpleobfs/tls.go similarity index 100% rename from component/outbound/dialer/transport/simpleobfs/tls.go rename to component/outbound/transport/simpleobfs/tls.go diff --git a/component/outbound/dialer/transport/tls/tls.go b/component/outbound/transport/tls/tls.go similarity index 100% rename from component/outbound/dialer/transport/tls/tls.go rename to component/outbound/transport/tls/tls.go diff --git a/component/outbound/dialer/transport/ws/conn.go b/component/outbound/transport/ws/conn.go similarity index 100% rename from component/outbound/dialer/transport/ws/conn.go rename to component/outbound/transport/ws/conn.go diff --git a/component/outbound/dialer/transport/ws/ws.go b/component/outbound/transport/ws/ws.go similarity index 100% rename from component/outbound/dialer/transport/ws/ws.go rename to component/outbound/transport/ws/ws.go diff --git a/component/routing/matcher_builder.go b/component/routing/matcher_builder.go index 88b6b54..9bb253b 100644 --- a/component/routing/matcher_builder.go +++ b/component/routing/matcher_builder.go @@ -19,14 +19,14 @@ type MatcherBuilder interface { AddDomain(key string, values []string, outbound string) AddIp(values []netip.Prefix, outbound string) AddPort(values [][2]int, outbound string) - AddSource(values []netip.Prefix, outbound string) + AddSourceIp(values []netip.Prefix, outbound string) AddSourcePort(values [][2]int, outbound string) AddL4Proto(values consts.L4ProtoType, outbound string) AddIpVersion(values consts.IpVersion, outbound string) - AddMac(values [][6]byte, outbound string) + AddSourceMac(values [][6]byte, outbound string) AddFinal(outbound string) - AddAnyBefore(key string, values []string, outbound string) - AddAnyAfter(key string, values []string, outbound string) + AddAnyBefore(function string, key string, values []string, outbound string) + AddAnyAfter(function string, key string, values []string, outbound string) Build() (err error) } @@ -65,7 +65,7 @@ func ApplyMatcherBuilder(builder MatcherBuilder, rules []RoutingRule, finalOutbo if iFunc == len(rule.AndFunctions)-1 { outbound = rule.Outbound } - builder.AddAnyBefore(key, paramValueGroup, outbound) + builder.AddAnyBefore(f.Name, key, paramValueGroup, outbound) switch f.Name { case consts.Function_Domain: builder.AddDomain(key, paramValueGroup, outbound) @@ -77,7 +77,7 @@ func ApplyMatcherBuilder(builder MatcherBuilder, rules []RoutingRule, finalOutbo if f.Name == consts.Function_Ip { builder.AddIp(cidrs, outbound) } else { - builder.AddSource(cidrs, outbound) + builder.AddSourceIp(cidrs, outbound) } case consts.Function_Mac: var macAddrs [][6]byte @@ -88,7 +88,7 @@ func ApplyMatcherBuilder(builder MatcherBuilder, rules []RoutingRule, finalOutbo } macAddrs = append(macAddrs, mac) } - builder.AddMac(macAddrs, outbound) + builder.AddSourceMac(macAddrs, outbound) case consts.Function_Port, consts.Function_SourcePort: var portRanges [][2]int for _, v := range paramValueGroup { @@ -128,27 +128,29 @@ func ApplyMatcherBuilder(builder MatcherBuilder, rules []RoutingRule, finalOutbo default: return fmt.Errorf("unsupported function name: %v", f.Name) } - builder.AddAnyAfter(key, paramValueGroup, outbound) + builder.AddAnyAfter(f.Name, key, paramValueGroup, outbound) } } } - builder.AddAnyBefore("", nil, finalOutbound) + builder.AddAnyBefore("final", "", nil, finalOutbound) builder.AddFinal(finalOutbound) - builder.AddAnyAfter("", nil, finalOutbound) + builder.AddAnyAfter("final", "", nil, finalOutbound) return nil } type DefaultMatcherBuilder struct{} -func (d *DefaultMatcherBuilder) AddDomain(values []string, outbound string) {} -func (d *DefaultMatcherBuilder) AddIp(values []netip.Prefix, outbound string) {} -func (d *DefaultMatcherBuilder) AddPort(values [][2]int, outbound string) {} -func (d *DefaultMatcherBuilder) AddSource(values []netip.Prefix, outbound string) {} -func (d *DefaultMatcherBuilder) AddSourcePort(values [][2]int, outbound string) {} -func (d *DefaultMatcherBuilder) AddL4Proto(values consts.L4ProtoType, outbound string) {} -func (d *DefaultMatcherBuilder) AddIpVersion(values consts.IpVersion, outbound string) {} -func (d *DefaultMatcherBuilder) AddMac(values [][6]byte, outbound string) {} -func (d *DefaultMatcherBuilder) AddFinal(outbound string) {} -func (d *DefaultMatcherBuilder) AddAnyBefore(key string, values []string, outbound string) {} -func (d *DefaultMatcherBuilder) AddAnyAfter(key string, values []string, outbound string) {} -func (d *DefaultMatcherBuilder) Build() (err error) { return nil } +func (d *DefaultMatcherBuilder) AddDomain(values []string, outbound string) {} +func (d *DefaultMatcherBuilder) AddIp(values []netip.Prefix, outbound string) {} +func (d *DefaultMatcherBuilder) AddPort(values [][2]int, outbound string) {} +func (d *DefaultMatcherBuilder) AddSource(values []netip.Prefix, outbound string) {} +func (d *DefaultMatcherBuilder) AddSourcePort(values [][2]int, outbound string) {} +func (d *DefaultMatcherBuilder) AddL4Proto(values consts.L4ProtoType, outbound string) {} +func (d *DefaultMatcherBuilder) AddIpVersion(values consts.IpVersion, outbound string) {} +func (d *DefaultMatcherBuilder) AddMac(values [][6]byte, outbound string) {} +func (d *DefaultMatcherBuilder) AddFinal(outbound string) {} +func (d *DefaultMatcherBuilder) AddAnyBefore(function string, key string, values []string, outbound string) { +} +func (d *DefaultMatcherBuilder) AddAnyAfter(function string, key string, values []string, outbound string) { +} +func (d *DefaultMatcherBuilder) Build() (err error) { return nil } diff --git a/component/routing/routingA.go b/component/routing/routing.go similarity index 100% rename from component/routing/routingA.go rename to component/routing/routing.go diff --git a/component/routing/walker.go b/component/routing/walker.go index c283c70..bf0ea3b 100644 --- a/component/routing/walker.go +++ b/component/routing/walker.go @@ -184,7 +184,7 @@ func (s *RoutingAWalker) EnterDeclaration(ctx *routingA.DeclarationContext) { switch valueCtx := children[2].(type) { case *routingA.LiteralContext: value := getValueFromLiteral(valueCtx) - if key == "default" { + if key == consts.Declaration_Final { s.FinalOutbound = value } else { s.ReportError(ctx, ErrorType_Unsupported) @@ -201,6 +201,7 @@ func (s *RoutingAWalker) EnterDeclaration(ctx *routingA.DeclarationContext) { func (s *RoutingAWalker) EnterRoutingRule(ctx *routingA.RoutingRuleContext) { children := ctx.GetChildren() + //logrus.Debugln(ctx.GetText(), children) left, ok := children[0].(*routingA.RoutingRuleLeftContext) if !ok { s.ReportError(ctx, ErrorType_Unsupported, "not *RoutingRuleLeftContext: "+ctx.GetText()) diff --git a/config/bind.go b/config/bind.go new file mode 100644 index 0000000..cb5a47c --- /dev/null +++ b/config/bind.go @@ -0,0 +1,254 @@ +package config + +import ( + "fmt" + "github.com/spf13/viper" + "reflect" + "strconv" + "strings" +) + +var ( + ErrRequired = fmt.Errorf("required") + ErrMutualReference = fmt.Errorf("mutual reference or invalid value") + ErrOverlayHierarchicalKey = fmt.Errorf("overlay hierarchical key") +) + +type Binder struct { + viper *viper.Viper + toResolve map[string]string + resolved map[string]interface{} +} + +func MustGetMapKeys(m interface{}) (keys []string) { + v := reflect.ValueOf(m) + vKeys := v.MapKeys() + for _, k := range vKeys { + keys = append(keys, k.String()) + } + return keys +} + +func NewBinder(viper *viper.Viper) *Binder { + return &Binder{ + viper: viper, + toResolve: make(map[string]string), + resolved: make(map[string]interface{}), + } +} + +func (b *Binder) Bind(iface interface{}) error { + if err := b.bind(iface); err != nil { + return err + } + for len(b.toResolve) > 0 { + var changed bool + for key, expr := range b.toResolve { + ok, err := b.bindKey(key, expr) + if err != nil { + return err + } + if ok { + changed = true + if err := SetValueHierarchicalMap(b.resolved, key, b.viper.Get(key)); err != nil { + return fmt.Errorf("%w: %v", err, key) + } + delete(b.toResolve, key) + } + } + if !changed { + return fmt.Errorf("%v: %w", strings.Join(MustGetMapKeys(b.toResolve), ", "), ErrMutualReference) + } + } + return nil +} + +func (b *Binder) bind(iface interface{}, parts ...string) error { + // https://github.com/spf13/viper/issues/188 + ifv := reflect.ValueOf(iface) + ift := reflect.TypeOf(iface) +nextField: + for i := 0; i < ift.NumField(); i++ { + v := ifv.Field(i) + t := ift.Field(i) + tv, ok := t.Tag.Lookup("mapstructure") + if !ok { + continue + } + fields := strings.Split(tv, ",") + tv = fields[0] + switch v.Kind() { + case reflect.Struct: + if err := b.bind(v.Interface(), append(parts, tv)...); err != nil { + return err + } + default: + key := strings.Join(append(parts, tv), ".") + if b.viper.Get(key) == nil { + if defaultValue, ok := t.Tag.Lookup("default"); ok { + ok, err := b.bindKey(key, defaultValue) + if err != nil { + return err + } + if !ok { + b.toResolve[key] = defaultValue + continue nextField + } + } else if _, ok := t.Tag.Lookup("required"); ok { + if desc, ok := t.Tag.Lookup("desc"); ok { + key += " (" + desc + ")" + } + return fmt.Errorf("%w: %v", ErrRequired, key) + } else if len(fields) == 1 || fields[1] != "omitempty" { + // write an empty value + b.viper.Set(key, "") + } + } + if err := SetValueHierarchicalMap(b.resolved, key, b.viper.Get(key)); err != nil { + return fmt.Errorf("%w: %v", err, key) + } + } + } + return nil +} + +func (b *Binder) bindKey(key string, expr string) (ok bool, err error) { + b.viper.Set(key, expr) + return true, nil +} + +func SetValueHierarchicalMap(m map[string]interface{}, key string, val interface{}) error { + keys := strings.Split(key, ".") + lastKey := keys[len(keys)-1] + keys = keys[:len(keys)-1] + p := &m + for _, key := range keys { + if v, ok := (*p)[key]; ok { + vv, ok := v.(map[string]interface{}) + if !ok { + return ErrOverlayHierarchicalKey + } + p = &vv + } else { + (*p)[key] = make(map[string]interface{}) + vv := (*p)[key].(map[string]interface{}) + p = &vv + } + } + (*p)[lastKey] = val + return nil +} + +func SetValueHierarchicalStruct(m interface{}, key string, val string) error { + ifv, err := GetValueHierarchicalStruct(m, key) + if err != nil { + return err + } + if !FuzzyDecode(ifv.Addr().Interface(), val) { + return fmt.Errorf("type does not match: type \"%v\" and value \"%v\"", ifv.Kind(), val) + } + return nil +} + +func GetValueHierarchicalStruct(m interface{}, key string) (reflect.Value, error) { + keys := strings.Split(key, ".") + ifv := reflect.Indirect(reflect.ValueOf(m)) + ift := ifv.Type() + lastK := "" + for _, k := range keys { + found := false + if ift.Kind() == reflect.Struct { + for i := 0; i < ifv.NumField(); i++ { + name, ok := ift.Field(i).Tag.Lookup("mapstructure") + if ok && name == k { + found = true + ifv = ifv.Field(i) + ift = ifv.Type() + lastK = k + break + } + } + } + if !found { + return reflect.Value{}, fmt.Errorf(`unexpected key "%v": "%v" (%v type) has no member "%v"`, key, lastK, ift.Kind().String(), k) + } + } + return ifv, nil +} + +func FuzzyDecode(to interface{}, val string) bool { + v := reflect.Indirect(reflect.ValueOf(to)) + switch v.Kind() { + case reflect.Int: + i, err := strconv.ParseInt(val, 10, strconv.IntSize) + if err != nil { + return false + } + v.SetInt(i) + case reflect.Int8: + i, err := strconv.ParseInt(val, 10, 8) + if err != nil { + return false + } + v.SetInt(i) + case reflect.Int16: + i, err := strconv.ParseInt(val, 10, 16) + if err != nil { + return false + } + v.SetInt(i) + case reflect.Int32: + i, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return false + } + v.SetInt(i) + case reflect.Int64: + i, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return false + } + v.SetInt(i) + case reflect.Uint: + i, err := strconv.ParseUint(val, 10, strconv.IntSize) + if err != nil { + return false + } + v.SetUint(i) + case reflect.Uint8: + i, err := strconv.ParseUint(val, 10, 8) + if err != nil { + return false + } + v.SetUint(i) + case reflect.Uint16: + i, err := strconv.ParseUint(val, 10, 16) + if err != nil { + return false + } + v.SetUint(i) + case reflect.Uint32: + i, err := strconv.ParseUint(val, 10, 32) + if err != nil { + return false + } + v.SetUint(i) + case reflect.Uint64: + i, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return false + } + v.SetUint(i) + case reflect.Bool: + if val == "true" || val == "1" { + v.SetBool(true) + } else if val == "false" || val == "0" { + v.SetBool(false) + } else { + return false + } + case reflect.String: + v.SetString(val) + } + return true +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..a70e2d1 --- /dev/null +++ b/config/config.go @@ -0,0 +1,26 @@ +package config + +type Subscription struct { + Link string `mapstructure:"link"` + Select string `mapstructure:"select" default:"first"` + CacheLastNode bool `mapstructure:"cache_last_node" default:"true"` +} +type Cache struct { + Subscription CacheSubscription `mapstructure:"subscription"` +} +type CacheSubscription struct { + LastNode string `mapstructure:"last_node"` +} +type Params struct { + Node string `mapstructure:"node"` + Subscription Subscription `mapstructure:"subscription"` + + Cache Cache `mapstructure:"cache"` + + NoUDP bool `mapstructure:"no_udp"` + + TestNode bool `mapstructure:"test_node_before_use" default:"true"` + TestURL string `mapstructure:"test_url" default:"https://connectivitycheck.gstatic.com/generate_204"` +} + +var ParamsObj Params diff --git a/go.mod b/go.mod index f08cbc2..69421ea 100644 --- a/go.mod +++ b/go.mod @@ -5,20 +5,21 @@ go 1.19 require ( github.com/adrg/xdg v0.4.0 github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 - github.com/cilium/ebpf v0.9.3 + github.com/cilium/ebpf v0.10.0 github.com/gorilla/websocket v1.5.0 github.com/json-iterator/go v1.1.12 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/mzz2017/softwind v0.0.0-20230124072602-aabc556ba332 github.com/sirupsen/logrus v1.9.0 - github.com/v2fly/v2ray-core/v5 v5.2.1 - github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124054934-e2204d89186f + github.com/spf13/cobra v1.6.1 + github.com/spf13/viper v1.15.0 + github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124191403-ef9c0530cb53 + github.com/v2rayA/dae-config-dist/go/dae_config v0.0.0-20230126180746-eae2d0e30c27 github.com/v2rayA/shadowsocksR v1.0.4 github.com/vishvananda/netlink v1.1.0 golang.org/x/net v0.5.0 golang.org/x/sys v0.4.0 google.golang.org/protobuf v1.28.1 - gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -27,19 +28,36 @@ require ( github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165 // indirect github.com/dgryski/go-rc2 v0.0.0-20150621095337-8a9021637152 // indirect github.com/eknkc/basex v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mzz2017/disk-bloom v1.0.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb // indirect + github.com/spf13/afero v1.9.3 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect gitlab.com/yawning/chacha20.git v0.0.0-20190903091407-6d1cb28dc72c // indirect golang.org/x/crypto v0.5.0 // indirect golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/text v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 // indirect - google.golang.org/grpc v1.51.0 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + google.golang.org/grpc v1.52.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) -//replace github.com/mzz2017/softwind => /home/mzz/goProjects/softwind +replace github.com/mzz2017/softwind => /home/mzz/goProjects/softwind + +//replace github.com/cilium/ebpf => /home/mzz/goProjects/ebpf +//replace github.com/v2rayA/RoutingA-dist/go/routingA => /home/mzz/antlrProjects/RoutingA-antlr4/build/go/routingA +//replace github.com/v2rayA/dae-config-dist/go/dae_config => /home/mzz/antlrProjects/dae-config/build/go/dae_config diff --git a/go.sum b/go.sum index 86cb5ef..262046f 100644 --- a/go.sum +++ b/go.sum @@ -1,20 +1,58 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 h1:npHgfD4Tl2WJS3AJaMUi5ynGDPUBfkg3U3fCzDyXZ+4= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cilium/ebpf v0.9.3 h1:5KtxXZU+scyERvkJMEm16TbScVvuuMrlhPly78ZMbSc= -github.com/cilium/ebpf v0.9.3/go.mod h1:w27N4UjpaQ9X/DGrSugxUG+H+NhgntDuPb5lCzxCn8A= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.10.0 h1:nk5HPMeoBXtOzbkZBWym+ZWq1GIiHUsBFXxwewXAHLQ= +github.com/cilium/ebpf v0.10.0/go.mod h1:DPiVdY/kT534dgc9ERmvP8mWA+9gvwgKfRvk4nNWnoE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -33,16 +71,32 @@ github.com/eknkc/basex v1.0.1/go.mod h1:k/F/exNEHFdbs3ZHuasoP2E7zeWwZblG84Y7Z59v github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -54,61 +108,122 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mzz2017/disk-bloom v1.0.1 h1:rEF9MiXd9qMW3ibRpqcerLXULoTgRlM21yqqJl1B90M= github.com/mzz2017/disk-bloom v1.0.1/go.mod h1:JLHETtUu44Z6iBmsqzkOtFlRvXSlKnxjwiBRDapizDI= -github.com/mzz2017/softwind v0.0.0-20230124072602-aabc556ba332 h1:wIHkkJcBo3ln0cdl607LAg+O8yj7oLQ3FIlBhLfWxUo= -github.com/mzz2017/softwind v0.0.0-20230124072602-aabc556ba332/go.mod h1:K1nXwtBokwEsfOfdT/5zV6R8QabGkyhcR0iuTrRZcYY= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb h1:XfLJSPIOUX+osiMraVgIrMR27uMXnRJWGm1+GL8/63U= github.com/seiflotfy/cuckoofilter v0.0.0-20220411075957-e3b120b3f5fb/go.mod h1:bR6DqgcAl1zTcOX8/pE2Qkj9XO00eCNqmKb7lXP8EAg= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/v2fly/v2ray-core/v5 v5.2.1 h1:bWc0cxvzCbbcsIE7/C+NFeUytQJYWgjtcRMG242I2Rs= -github.com/v2fly/v2ray-core/v5 v5.2.1/go.mod h1:7MBSerlH+Wyq9Bvm90txc8Ey9C9MAwkM+ZtkMbKT0Zo= -github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124054934-e2204d89186f h1:qmnamMCAaJbGw+XtQJfTGtL3gm7+7dYMY8neh+MTUkE= -github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124054934-e2204d89186f/go.mod h1:BGhPbUF5jE2YhQiomx2F48N2lVNppcAlNCGfVhf5Lx4= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124191403-ef9c0530cb53 h1:seZfccOo70jrXD4LZAvM7AbW6IFXY2r230GgV7tsnro= +github.com/v2rayA/RoutingA-dist/go/routingA v0.0.0-20230124191403-ef9c0530cb53/go.mod h1:BGhPbUF5jE2YhQiomx2F48N2lVNppcAlNCGfVhf5Lx4= +github.com/v2rayA/dae-config-dist/go/dae_config v0.0.0-20230126180746-eae2d0e30c27 h1:IXDCb9A8PBdB+bBMVdCBP3c61DzUqLYQ4lb6tw8RXOo= +github.com/v2rayA/dae-config-dist/go/dae_config v0.0.0-20230126180746-eae2d0e30c27/go.mod h1:JiTWeZybOkBfCqv/fy5jbFhXTxuLlyrI76gRNazz2sU= github.com/v2rayA/shadowsocksR v1.0.4 h1:65Ltdy+I/DnlkQTJj+R+X85zhZ63ORE1Roy+agAcF/s= github.com/v2rayA/shadowsocksR v1.0.4/go.mod h1:CyOhDLy8/AKedsi16xRYAMmkxSCH1ukJPaacaTdRfQg= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= @@ -116,97 +231,306 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= gitlab.com/yawning/chacha20.git v0.0.0-20190903091407-6d1cb28dc72c h1:yrfrd1u7MWIwWIulet2TZPEkeNQhQ/GcPLdPXgiEEr0= gitlab.com/yawning/chacha20.git v0.0.0-20190903091407-6d1cb28dc72c/go.mod h1:3x6b94nWCP/a2XB/joOPMiGYUBvqbLfeY/BkHLeDs6s= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71 h1:z+ErRPu0+KS02Td3fOAgdX+lnPDh/VyaABEJPD4JRQs= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -215,20 +539,31 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/insert.sh b/insert.sh index 9b344f1..83c13d8 100755 --- a/insert.sh +++ b/insert.sh @@ -5,11 +5,13 @@ sudo tc qdisc add dev $dev clsact > /dev/null 2>&1 set -ex -# clang -fno-stack-protector -O2 -g -emit-llvm -c component/control/kern/tproxy.c -o - | llc -march=bpf -mcpu=probe -filetype=obj -o foo.o +sudo rm -rf /sys/fs/bpf/tc/globals/* + +# clang -fno-stack-protector -O2 -g -emit-llvm -c component/control/kern/tproxy.c -o - | llc -march=bpf -mcpu=v3 -filetype=obj -o foo.o clang -O2 -g -Wall -Werror -c component/control/kern/tproxy.c -target bpf -o foo.o sudo tc filter del dev $dev ingress -sudo tc filter add dev $dev ingress bpf direct-action obj foo.o sec tc/ingress sudo tc filter del dev $dev egress +sudo tc filter add dev $dev ingress bpf direct-action obj foo.o sec tc/ingress sudo tc filter add dev $dev egress bpf direct-action obj foo.o sec tc/egress exit 0 \ No newline at end of file diff --git a/main.go b/main.go index 9ede86c..26a8923 100644 --- a/main.go +++ b/main.go @@ -8,51 +8,19 @@ package main import ( - "github.com/sirupsen/logrus" - "github.com/v2rayA/dae/component/control" - "github.com/v2rayA/dae/pkg/logger" + "github.com/json-iterator/go/extra" + "github.com/v2rayA/dae/cmd" + "net/http" "os" - "os/signal" - "syscall" + "time" ) func main() { - const ( - tproxyPort = 12345 - ifname = "docker0" - ) - logrus.SetLevel(logrus.DebugLevel) - log := logger.NewLogger(2) - log.Println("Running") - t, err := control.NewControlPlane(log, ` -default:proxy -#sip(172.17.0.2)->proxy -#mac("02:42:ac:11:00:02")->proxy -#ipversion(4)->proxy -#l4proto(tcp)->proxy -ip(119.29.29.29) -> proxy -ip(223.5.5.5) -> direct -ip(geoip:cn) -> direct -domain(geosite:cn, suffix:"ip.sb") -> direct -ip("91.105.192.0/23","91.108.4.0/22","91.108.8.0/21","91.108.16.0/21","91.108.56.0/22","95.161.64.0/20","149.154.160.0/20","185.76.151.0/24")->proxy -domain(geosite:category-scholar-!cn, geosite:category-scholar-cn)->direct -`) - if err != nil { - panic(err) - } - if err = t.BindLink(ifname); err != nil { - panic(err) - } - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGKILL, syscall.SIGILL) - go func() { - if err := t.ListenAndServe(tproxyPort); err != nil { - log.Errorln("ListenAndServe:", err) - sigs <- nil - } - }() - <-sigs - if e := t.Close(); e != nil { - log.Errorln("Close control plane:", err) + extra.RegisterFuzzyDecoders() + + http.DefaultClient.Timeout = 30 * time.Second + if err := cmd.Execute(); err != nil { + os.Exit(1) } + os.Exit(0) } diff --git a/pkg/config_parser/config_parser.go b/pkg/config_parser/config_parser.go new file mode 100644 index 0000000..bbf89e5 --- /dev/null +++ b/pkg/config_parser/config_parser.go @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2023, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package config_parser + +import ( + "fmt" + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/v2rayA/dae-config-dist/go/dae_config" +) + +func Parse(in string) (sections []*Section, err error) { + errorListener := NewConsoleErrorListener() + lexer := dae_config.Newdae_configLexer(antlr.NewInputStream(in)) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(errorListener) + input := antlr.NewCommonTokenStream(lexer, 0) + + parser := dae_config.Newdae_configParser(input) + parser.RemoveErrorListeners() + parser.AddErrorListener(errorListener) + parser.BuildParseTrees = true + tree := parser.Start() + + walker := NewWalker(parser) + antlr.ParseTreeWalkerDefault.Walk(walker, tree) + if errorListener.ErrorBuilder.Len() != 0 { + return nil, fmt.Errorf("%v", errorListener.ErrorBuilder.String()) + } + + return walker.Sections, nil +} diff --git a/pkg/config_parser/config_parser_test.go b/pkg/config_parser/config_parser_test.go new file mode 100644 index 0000000..e5af8c1 --- /dev/null +++ b/pkg/config_parser/config_parser_test.go @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2023, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package config_parser + +import "testing" + +func TestParse(t *testing.T) { + sections, err := Parse(` +# gugu +include { + another.conf +} + +global { + # tproxy port to listen. + tproxy_port: 12345 + + # Node connectivity check url. + check_url: 'https://connectivitycheck.gstatic.com/generate_204' + + # Now only support UDP and IP:Port. + # Please make sure DNS traffic will go through and be forwarded by dae. + dns_upstream: '1.1.1.1:53' + + # Now only support one interface. + ingress_interface: docker0 +} + +# subscription will be resolved as nodes and merged into node pool below. +subscription { + https://LINK +} + +node { + 'ss://LINK' + 'ssr://LINK' + 'vmess://LINK' + 'vless://LINK' + 'trojan://LINK' + 'trojan-go://LINK' + 'socks5://LINK#name' + 'http://LINK#name' + 'https://LINK#name' +} + +group { + my_group { + # Pass node links as input of lua script filter. + # gugu + filter: link(lua:filename.lua) + + # Randomly select a node from the group for every connection. + policy: random + } + + disney { + # Pass node names as input of keyword/regex filter. + filter: name(regex:'^.*hk.*$', keyword:'sg') && name(keyword:'disney') + + # Select the node with min average of the last 10 latencies from the group for every connection. + policy: min_avg10 + } + + netflix { + # Pass node names as input of keyword filter. + filter: name(keyword:netflix) + + # Select the first node from the group for every connection. + policy: fixed(0) + } +} + +routing { + domain(geosite:category-ads) -> block + domain(geosite:disney) -> disney + domain(geosite:netflix) -> netflix + ip(geoip:cn) -> direct + domain(geosite:cn) -> direct + final: my_group +} +`) + if err != nil { + t.Fatalf("\n%v", err) + } + for _, section := range sections { + t.Logf("\n%v", section.String()) + } +} diff --git a/pkg/config_parser/error.go b/pkg/config_parser/error.go new file mode 100644 index 0000000..b73832f --- /dev/null +++ b/pkg/config_parser/error.go @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2022, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package config_parser + +import ( + "fmt" + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "reflect" + "strings" +) + +type ErrorType string + +const ( + ErrorType_Unsupported ErrorType = "is not supported" + ErrorType_NotSet ErrorType = "is not set" +) + +type ConsoleErrorListener struct { + ErrorBuilder strings.Builder +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return &ConsoleErrorListener{} +} + +func (d *ConsoleErrorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) { + // Do not accumulate errors. + if d.ErrorBuilder.Len() > 0 { + return + } + backtrack := column + if backtrack > 30 { + backtrack = 30 + } + starting := fmt.Sprintf("line %v:%v ", line, column) + offset := len(starting) + backtrack + var ( + simplyWrite bool + token antlr.Token + ) + if offendingSymbol == nil { + simplyWrite = true + } else { + token = offendingSymbol.(antlr.Token) + simplyWrite = token.GetTokenType() == -1 + } + if simplyWrite { + d.ErrorBuilder.WriteString(fmt.Sprintf("%v%v", starting, msg)) + return + } + + beginOfLine := token.GetStart() - backtrack + strPeek := token.GetInputStream().GetText(beginOfLine, token.GetStop()+30) + wrap := strings.IndexByte(strPeek, '\n') + if wrap == -1 { + wrap = token.GetStop() + 30 + } else { + wrap += beginOfLine - 1 + } + strLine := token.GetInputStream().GetText(beginOfLine, wrap) + d.ErrorBuilder.WriteString(fmt.Sprintf("%v%v\n%v%v: %v\n", starting, strLine, strings.Repeat(" ", offset), strings.Repeat("^", token.GetStop()-token.GetStart()+1), msg)) +} +func (d *ConsoleErrorListener) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +} + +func (d *ConsoleErrorListener) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +} + +func (d *ConsoleErrorListener) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) { +} + +func BaseContext(ctx interface{}) (baseCtx *antlr.BaseParserRuleContext) { + val := reflect.ValueOf(ctx) + for val.Kind() == reflect.Pointer && val.Type() != reflect.TypeOf(&antlr.BaseParserRuleContext{}) { + val = val.Elem() + } + if val.Type() == reflect.TypeOf(&antlr.BaseParserRuleContext{}) { + baseCtx = val.Interface().(*antlr.BaseParserRuleContext) + } else { + baseCtxVal := val.FieldByName("BaseParserRuleContext") + if !baseCtxVal.IsValid() { + panic("has no field BaseParserRuleContext") + } + baseCtx = baseCtxVal.Interface().(*antlr.BaseParserRuleContext) + } + return baseCtx +} diff --git a/pkg/config_parser/section.go b/pkg/config_parser/section.go new file mode 100644 index 0000000..95a4f89 --- /dev/null +++ b/pkg/config_parser/section.go @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2023, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package config_parser + +import ( + "fmt" + "strconv" + "strings" +) + +type ItemType int + +const ( + ItemType_RoutingRule ItemType = iota + ItemType_Param + ItemType_Section +) + +func (t ItemType) String() string { + switch t { + case ItemType_RoutingRule: + return "RoutingRule" + case ItemType_Param: + return "Param" + case ItemType_Section: + return "Section" + default: + return "" + } +} + +func NewRoutingRuleItem(rule *RoutingRule) *Item { + return &Item{ + Type: ItemType_RoutingRule, + Value: rule, + } +} + +func NewParamItem(param *Param) *Item { + return &Item{ + Type: ItemType_Param, + Value: param, + } +} + +func NewSectionItem(section *Section) *Item { + return &Item{ + Type: ItemType_Param, + Value: section, + } +} + +type Item struct { + Type ItemType + Value interface{} +} + +func (i *Item) String() string { + var builder strings.Builder + builder.WriteString("type: " + i.Type.String() + "\n") + var content string + switch val := i.Value.(type) { + case *RoutingRule: + content = val.String(false) + case *Param: + content = val.String() + case *Section: + content = val.String() + default: + return "\n" + } + lines := strings.Split(content, "\n") + for i := range lines { + lines[i] = "\t" + lines[i] + } + builder.WriteString(strings.Join(lines, "\n")) + return builder.String() +} + +type Section struct { + Name string + Items []*Item +} + +func (s *Section) String() string { + var builder strings.Builder + builder.WriteString("section: " + s.Name + "\n") + var strItemList []string + for _, item := range s.Items { + lines := strings.Split(item.String(), "\n") + for i := range lines { + lines[i] = "\t" + lines[i] + } + strItemList = append(strItemList, strings.Join(lines, "\n")) + } + builder.WriteString(strings.Join(strItemList, "\n")) + return builder.String() +} + +type Param struct { + Key string + Val string + AndFunctions []*Function +} + +func (p *Param) String() string { + if p.Key == "" { + return p.Val + } + if p.AndFunctions != nil { + a := paramAndFunctions{ + Key: p.Key, + AndFunctions: p.AndFunctions, + } + return a.String() + } + return p.Key + ": " + p.Val +} + +type Function struct { + Name string + Params []*Param +} + +func (f *Function) String() string { + var builder strings.Builder + builder.WriteString(f.Name + "(") + var strParamList []string + for _, p := range f.Params { + strParamList = append(strParamList, p.String()) + } + builder.WriteString(strings.Join(strParamList, ", ")) + builder.WriteString(")") + return builder.String() +} + +type paramAndFunctions struct { + Key string + AndFunctions []*Function +} + +func (p *paramAndFunctions) String() string { + var builder strings.Builder + builder.WriteString(p.Key + ": ") + var strFunctionList []string + for _, f := range p.AndFunctions { + strFunctionList = append(strFunctionList, f.String()) + } + builder.WriteString(strings.Join(strFunctionList, " && ")) + return builder.String() +} + +type RoutingRule struct { + AndFunctions []*Function + Outbound string +} + +func (r *RoutingRule) String(calcN bool) string { + var builder strings.Builder + var n int + for _, f := range r.AndFunctions { + if builder.Len() != 0 { + builder.WriteString(" && ") + } + var paramBuilder strings.Builder + n += len(f.Params) + for _, p := range f.Params { + if paramBuilder.Len() != 0 { + paramBuilder.WriteString(", ") + } + if p.Key != "" { + paramBuilder.WriteString(p.Key + ": " + p.Val) + } else { + paramBuilder.WriteString(p.Val) + } + } + builder.WriteString(fmt.Sprintf("%v(%v)", f.Name, paramBuilder.String())) + } + builder.WriteString(" -> " + r.Outbound) + if calcN { + builder.WriteString(" [n = " + strconv.Itoa(n) + "]") + } + return builder.String() +} diff --git a/pkg/config_parser/walker.go b/pkg/config_parser/walker.go new file mode 100644 index 0000000..1be8ce8 --- /dev/null +++ b/pkg/config_parser/walker.go @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: AGPL-3.0-only + * Copyright (c) since 2022, mzz2017 (mzz@tuta.io). All rights reserved. + */ + +package config_parser + +import ( + "fmt" + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/v2rayA/dae-config-dist/go/dae_config" + "log" + "strconv" +) + +type Walker struct { + *dae_config.Basedae_configListener + parser antlr.Parser + + Sections []*Section +} + +func NewWalker(parser antlr.Parser) *Walker { + return &Walker{ + parser: parser, + } +} + +type paramParser struct { + list []*Param +} + +func getValueFromLiteral(literal *dae_config.LiteralContext) string { + quote := literal.Quote_literal() + if quote == nil { + return literal.GetText() + } + text := quote.GetText() + return text[1 : len(text)-1] +} + +func (p *paramParser) parseParam(ctx *dae_config.ParameterContext) *Param { + children := ctx.GetChildren() + if len(children) == 3 { + return &Param{ + Key: children[0].(*antlr.TerminalNodeImpl).GetText(), + Val: getValueFromLiteral(children[2].(*dae_config.LiteralContext)), + } + } else if len(children) == 1 { + return &Param{ + Key: "", + Val: getValueFromLiteral(children[0].(*dae_config.LiteralContext)), + } + } + panic("unexpected") +} +func (p *paramParser) parseNonEmptyParamList(ctx *dae_config.NonEmptyParameterListContext) { + children := ctx.GetChildren() + if len(children) == 3 { + p.list = append(p.list, p.parseParam(children[2].(*dae_config.ParameterContext))) + p.parseNonEmptyParamList(children[0].(*dae_config.NonEmptyParameterListContext)) + } else if len(children) == 1 { + p.list = append(p.list, p.parseParam(children[0].(*dae_config.ParameterContext))) + } +} + +func (w *Walker) parseNonEmptyParamList(list *dae_config.NonEmptyParameterListContext) []*Param { + paramParser := new(paramParser) + paramParser.parseNonEmptyParamList(list) + return paramParser.list +} + +func (w *Walker) reportKeyUnsupportedError(ctx interface{}, keyName, funcName string) { + w.ReportError(ctx, ErrorType_Unsupported, fmt.Sprintf("key %v in %v()", strconv.Quote(keyName), funcName)) +} + +type functionVerifier func(function *Function, ctx interface{}) bool + +func (w *Walker) parseFunctionPrototype(ctx *dae_config.FunctionPrototypeContext, verifier functionVerifier) *Function { + children := ctx.GetChildren() + funcName := children[0].(*antlr.TerminalNodeImpl).GetText() + paramList := children[2].(*dae_config.OptParameterListContext) + children = paramList.GetChildren() + if len(children) == 0 { + w.ReportError(ctx, ErrorType_Unsupported, "empty parameter list") + return nil + } + nonEmptyParamList := children[0].(*dae_config.NonEmptyParameterListContext) + params := w.parseNonEmptyParamList(nonEmptyParamList) + f := &Function{ + Name: funcName, + Params: params, + } + // Verify function name and param keys. + if verifier != nil && !verifier(f, ctx) { + return nil + } + return f +} + +func (w *Walker) ReportError(ctx interface{}, errorType ErrorType, target ...string) { + //debug.PrintStack() + bCtx := BaseContext(ctx) + tgt := strconv.Quote(bCtx.GetStart().GetText()) + if len(target) != 0 { + tgt = target[0] + } + if errorType == ErrorType_NotSet { + w.parser.NotifyErrorListeners(fmt.Sprintf("%v %v.", tgt, errorType), nil, nil) + return + } + w.parser.NotifyErrorListeners(fmt.Sprintf("%v %v.", tgt, errorType), bCtx.GetStart(), nil) +} + +func (w *Walker) parseDeclaration(ctx dae_config.IDeclarationContext) *Param { + children := ctx.GetChildren() + key := children[0].(*antlr.TerminalNodeImpl).GetText() + switch valueCtx := children[2].(type) { + case *dae_config.LiteralContext: + value := getValueFromLiteral(valueCtx) + return &Param{ + Key: key, + Val: value, + } + case *dae_config.FunctionPrototypeExpressionContext: + andFunctions := w.parseFunctionPrototypeExpression(valueCtx, nil) + return &Param{ + Key: key, + AndFunctions: andFunctions, + } + default: + w.ReportError(valueCtx, ErrorType_Unsupported) + return nil + } +} + +func (w *Walker) parseFunctionPrototypeExpression(ctx dae_config.IFunctionPrototypeExpressionContext, verifier functionVerifier) (andFunctions []*Function) { + children := ctx.GetChildren() + for _, child := range children { + // And rules. + if child, ok := child.(*dae_config.FunctionPrototypeContext); ok { + function := w.parseFunctionPrototype(child, verifier) + andFunctions = append(andFunctions, function) + } + } + return andFunctions +} + +func (w *Walker) parseRoutingRule(ctx dae_config.IRoutingRuleContext) *RoutingRule { + children := ctx.GetChildren() + //logrus.Debugln(ctx.GetText(), children) + left, ok := children[0].(*dae_config.RoutingRuleLeftContext) + if !ok { + w.ReportError(ctx, ErrorType_Unsupported, "not *RoutingRuleLeftContext: "+ctx.GetText()) + return nil + } + outbound := children[2].(*dae_config.Bare_literalContext).GetText() + // Parse functions. + children = left.GetChildren() + functionList, ok := children[1].(*dae_config.FunctionPrototypeExpressionContext) + if !ok { + w.ReportError(ctx, ErrorType_Unsupported, "not *FunctionPrototypeExpressionContext: "+ctx.GetText()) + return nil + } + andFunctions := w.parseFunctionPrototypeExpression(functionList, nil) + return &RoutingRule{ + AndFunctions: andFunctions, + Outbound: outbound, + } +} + +type routingRuleOrDeclarationOrLiteralOrExpressionListParser struct { + Items []*Item + Walker *Walker +} + +func (p *routingRuleOrDeclarationOrLiteralOrExpressionListParser) Parse(ctx dae_config.IRoutingRuleOrDeclarationOrLiteralOrExpressionListContext) { + for _, elem := range ctx.GetChildren() { + switch elem := elem.(type) { + case dae_config.IRoutingRuleContext: + rule := p.Walker.parseRoutingRule(elem) + if rule == nil { + return + } + p.Items = append(p.Items, NewRoutingRuleItem(rule)) + case dae_config.IDeclarationContext: + param := p.Walker.parseDeclaration(elem) + if param == nil { + return + } + p.Items = append(p.Items, NewParamItem(param)) + case *dae_config.LiteralContext: + p.Items = append(p.Items, NewParamItem(&Param{ + Key: "", + Val: getValueFromLiteral(elem), + })) + case dae_config.IExpressionContext: + section := p.Walker.parseExpression(elem) + if section == nil { + return + } + p.Items = append(p.Items, NewSectionItem(section)) + case dae_config.IRoutingRuleOrDeclarationOrLiteralOrExpressionListContext: + p.Parse(elem) + default: + log.Printf("? %v", elem.(*dae_config.ExpressionContext)) + p.Walker.ReportError(elem, ErrorType_Unsupported) + return + } + } +} +func (w *Walker) parseRoutingRuleOrDeclarationOrLiteralOrExpressionListContext(ctx dae_config.IRoutingRuleOrDeclarationOrLiteralOrExpressionListContext) []*Item { + parser := routingRuleOrDeclarationOrLiteralOrExpressionListParser{ + Items: nil, + Walker: w, + } + parser.Parse(ctx) + return parser.Items + +} + +func (w *Walker) parseExpression(exp dae_config.IExpressionContext) *Section { + children := exp.GetChildren() + name := children[0].(*antlr.TerminalNodeImpl).GetText() + list := children[2].(dae_config.IRoutingRuleOrDeclarationOrLiteralOrExpressionListContext) + items := w.parseRoutingRuleOrDeclarationOrLiteralOrExpressionListContext(list) + return &Section{ + Name: name, + Items: items, + } +} + +func (w *Walker) EnterProgramStructureBlcok(ctx *dae_config.ProgramStructureBlcokContext) { + section := w.parseExpression(ctx.Expression()) + if section == nil { + return + } + w.Sections = append(w.Sections, section) +} diff --git a/pkg/geodata/common.pb.go b/pkg/geodata/common.pb.go index fd2a3e2..0c24e05 100644 --- a/pkg/geodata/common.pb.go +++ b/pkg/geodata/common.pb.go @@ -3,11 +3,11 @@ package geodata import ( - _ "github.com/v2fly/v2ray-core/v5/common/protoext" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" + _ "github.com/v2rayA/dae/pkg/geodata/protoext" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + "reflect" + "sync" ) const ( diff --git a/pkg/geodata/protoext/extensions.pb.go b/pkg/geodata/protoext/extensions.pb.go new file mode 100644 index 0000000..54f273e --- /dev/null +++ b/pkg/geodata/protoext/extensions.pb.go @@ -0,0 +1,355 @@ +// Copied from https://github.com/v2fly/v2ray-core/tree/42b166760b2ba8d984e514b830fcd44e23728e43 + +package protoext + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MessageOpt struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type []string `protobuf:"bytes,1,rep,name=type,proto3" json:"type,omitempty"` + ShortName []string `protobuf:"bytes,2,rep,name=short_name,json=shortName,proto3" json:"short_name,omitempty"` + TransportOriginalName string `protobuf:"bytes,86001,opt,name=transport_original_name,json=transportOriginalName,proto3" json:"transport_original_name,omitempty"` +} + +func (x *MessageOpt) Reset() { + *x = MessageOpt{} + if protoimpl.UnsafeEnabled { + mi := &file_common_protoext_extensions_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOpt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOpt) ProtoMessage() {} + +func (x *MessageOpt) ProtoReflect() protoreflect.Message { + mi := &file_common_protoext_extensions_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOpt.ProtoReflect.Descriptor instead. +func (*MessageOpt) Descriptor() ([]byte, []int) { + return file_common_protoext_extensions_proto_rawDescGZIP(), []int{0} +} + +func (x *MessageOpt) GetType() []string { + if x != nil { + return x.Type + } + return nil +} + +func (x *MessageOpt) GetShortName() []string { + if x != nil { + return x.ShortName + } + return nil +} + +func (x *MessageOpt) GetTransportOriginalName() string { + if x != nil { + return x.TransportOriginalName + } + return "" +} + +type FieldOpt struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AnyWants []string `protobuf:"bytes,1,rep,name=any_wants,json=anyWants,proto3" json:"any_wants,omitempty"` + AllowedValues []string `protobuf:"bytes,2,rep,name=allowed_values,json=allowedValues,proto3" json:"allowed_values,omitempty"` + AllowedValueTypes []string `protobuf:"bytes,3,rep,name=allowed_value_types,json=allowedValueTypes,proto3" json:"allowed_value_types,omitempty"` + // convert_time_read_file_into read a file into another field, and clear this field during input parsing + ConvertTimeReadFileInto string `protobuf:"bytes,4,opt,name=convert_time_read_file_into,json=convertTimeReadFileInto,proto3" json:"convert_time_read_file_into,omitempty"` + // forbidden marks a boolean to be inaccessible to user + Forbidden bool `protobuf:"varint,5,opt,name=forbidden,proto3" json:"forbidden,omitempty"` + // convert_time_resource_loading read a file, and place its resource hash into another field + ConvertTimeResourceLoading string `protobuf:"bytes,6,opt,name=convert_time_resource_loading,json=convertTimeResourceLoading,proto3" json:"convert_time_resource_loading,omitempty"` + // convert_time_parse_ip parse a string ip address, and put its binary representation into another field + ConvertTimeParseIp string `protobuf:"bytes,7,opt,name=convert_time_parse_ip,json=convertTimeParseIp,proto3" json:"convert_time_parse_ip,omitempty"` +} + +func (x *FieldOpt) Reset() { + *x = FieldOpt{} + if protoimpl.UnsafeEnabled { + mi := &file_common_protoext_extensions_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOpt) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOpt) ProtoMessage() {} + +func (x *FieldOpt) ProtoReflect() protoreflect.Message { + mi := &file_common_protoext_extensions_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOpt.ProtoReflect.Descriptor instead. +func (*FieldOpt) Descriptor() ([]byte, []int) { + return file_common_protoext_extensions_proto_rawDescGZIP(), []int{1} +} + +func (x *FieldOpt) GetAnyWants() []string { + if x != nil { + return x.AnyWants + } + return nil +} + +func (x *FieldOpt) GetAllowedValues() []string { + if x != nil { + return x.AllowedValues + } + return nil +} + +func (x *FieldOpt) GetAllowedValueTypes() []string { + if x != nil { + return x.AllowedValueTypes + } + return nil +} + +func (x *FieldOpt) GetConvertTimeReadFileInto() string { + if x != nil { + return x.ConvertTimeReadFileInto + } + return "" +} + +func (x *FieldOpt) GetForbidden() bool { + if x != nil { + return x.Forbidden + } + return false +} + +func (x *FieldOpt) GetConvertTimeResourceLoading() string { + if x != nil { + return x.ConvertTimeResourceLoading + } + return "" +} + +func (x *FieldOpt) GetConvertTimeParseIp() string { + if x != nil { + return x.ConvertTimeParseIp + } + return "" +} + +var file_common_protoext_extensions_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*MessageOpt)(nil), + Field: 50000, + Name: "v2ray.core.common.protoext.message_opt", + Tag: "bytes,50000,opt,name=message_opt", + Filename: "common/protoext/extensions.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldOpt)(nil), + Field: 50000, + Name: "v2ray.core.common.protoext.field_opt", + Tag: "bytes,50000,opt,name=field_opt", + Filename: "common/protoext/extensions.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional v2ray.core.common.protoext.MessageOpt message_opt = 50000; + E_MessageOpt = &file_common_protoext_extensions_proto_extTypes[0] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional v2ray.core.common.protoext.FieldOpt field_opt = 50000; + E_FieldOpt = &file_common_protoext_extensions_proto_extTypes[1] +) + +var File_common_protoext_extensions_proto protoreflect.FileDescriptor + +var file_common_protoext_extensions_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x65, 0x78, + 0x74, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x1a, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x65, 0x78, 0x74, 0x1a, 0x20, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x79, 0x0a, 0x0a, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x38, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xf1, 0x9f, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x4f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x08, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x6e, 0x79, 0x5f, + 0x77, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x6e, 0x79, + 0x57, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x1b, + 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x17, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, + 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x6f, + 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, + 0x6f, 0x72, 0x62, 0x69, 0x64, 0x64, 0x65, 0x6e, 0x12, 0x41, 0x0a, 0x1d, 0x63, 0x6f, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1a, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x31, 0x0a, 0x15, 0x63, + 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x73, + 0x65, 0x5f, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x73, 0x65, 0x49, 0x70, 0x3a, 0x6a, + 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd0, + 0x86, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x65, 0x78, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x52, 0x0a, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x3a, 0x62, 0x0a, 0x09, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xd0, 0x86, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x65, 0x78, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x52, 0x08, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x42, 0x6f, + 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x65, 0x78, 0x74, + 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, + 0x32, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x35, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x65, + 0x78, 0x74, 0xaa, 0x02, 0x1a, 0x56, 0x32, 0x52, 0x61, 0x79, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x78, 0x74, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_common_protoext_extensions_proto_rawDescOnce sync.Once + file_common_protoext_extensions_proto_rawDescData = file_common_protoext_extensions_proto_rawDesc +) + +func file_common_protoext_extensions_proto_rawDescGZIP() []byte { + file_common_protoext_extensions_proto_rawDescOnce.Do(func() { + file_common_protoext_extensions_proto_rawDescData = protoimpl.X.CompressGZIP(file_common_protoext_extensions_proto_rawDescData) + }) + return file_common_protoext_extensions_proto_rawDescData +} + +var file_common_protoext_extensions_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_common_protoext_extensions_proto_goTypes = []interface{}{ + (*MessageOpt)(nil), // 0: v2ray.core.common.protoext.MessageOpt + (*FieldOpt)(nil), // 1: v2ray.core.common.protoext.FieldOpt + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions +} +var file_common_protoext_extensions_proto_depIdxs = []int32{ + 2, // 0: v2ray.core.common.protoext.message_opt:extendee -> google.protobuf.MessageOptions + 3, // 1: v2ray.core.common.protoext.field_opt:extendee -> google.protobuf.FieldOptions + 0, // 2: v2ray.core.common.protoext.message_opt:type_name -> v2ray.core.common.protoext.MessageOpt + 1, // 3: v2ray.core.common.protoext.field_opt:type_name -> v2ray.core.common.protoext.FieldOpt + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 2, // [2:4] is the sub-list for extension type_name + 0, // [0:2] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_common_protoext_extensions_proto_init() } +func file_common_protoext_extensions_proto_init() { + if File_common_protoext_extensions_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_common_protoext_extensions_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOpt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_common_protoext_extensions_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOpt); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_common_protoext_extensions_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 2, + NumServices: 0, + }, + GoTypes: file_common_protoext_extensions_proto_goTypes, + DependencyIndexes: file_common_protoext_extensions_proto_depIdxs, + MessageInfos: file_common_protoext_extensions_proto_msgTypes, + ExtensionInfos: file_common_protoext_extensions_proto_extTypes, + }.Build() + File_common_protoext_extensions_proto = out.File + file_common_protoext_extensions_proto_rawDesc = nil + file_common_protoext_extensions_proto_goTypes = nil + file_common_protoext_extensions_proto_depIdxs = nil +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 3a83857..cd82275 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -19,6 +19,7 @@ func NewLogger(verbose int) *logrus.Logger { default: level = logrus.TraceLevel } + log.SetLevel(level) return log