*: Replace libbpfgo with cilium/ebpf (#2771)

This commit is contained in:
Derek Parker 2021-11-03 08:58:04 -07:00 committed by GitHub
parent c207db792a
commit cd9e6c02a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
115 changed files with 15528 additions and 2642 deletions

1
.gitignore vendored

@ -10,4 +10,3 @@ localtests
*.iml
.teamcity/target
.vscode
**/*.o

@ -1,7 +1,5 @@
.DEFAULT_GOAL=test
BPF_OBJ := pkg/proc/internal/ebpf/trace_probe/trace.o
BPF_SRC := $(shell find . -type f -name '*.bpf.*')
GO_SRC := $(shell find . -type f -not -path './_fixtures/*' -not -path './vendor/*' -not -path './_scripts/*' -not -path './localtests/*' -name '*.go')
check-cert:
@ -10,28 +8,6 @@ check-cert:
build: $(GO_SRC)
@go run _scripts/make.go build
docker-image-build:
@docker build -t ebpf-builder:latest -f ./pkg/proc/internal/ebpf/trace_probe/Dockerfile ./pkg/proc/internal/ebpf/
docker-ebpf-obj-build: docker-image-build
@docker run -it --rm \
-v $(abspath .):/delve \
ebpf-builder:latest
$(BPF_OBJ): $(BPF_SRC)
clang \
-I /usr/include \
-I /usr/src/kernels/$(uname -r)/tools/lib \
-I /usr/src/kernels/$(uname -r)/tools/bpf/resolve_btfids/libbpf \
-g -O2 \
-c \
-target bpf \
-o $(BPF_OBJ) \
pkg/proc/internal/ebpf/trace_probe/trace.bpf.c
build-bpf: $(BPF_OBJ) $(GO_SRC)
@env CGO_LDFLAGS="/usr/lib/libbpf.a" go run _scripts/make.go build --tags=ebpf
install: $(GO_SRC)
@go run _scripts/make.go install
@ -53,4 +29,4 @@ test-integration-run:
vendor:
@go run _scripts/make.go vendor
.PHONY: vendor test-integration-run test-proc-run test check-cert install build vet build-bpf uninstall docker-image-build docker-ebpf-obj-build
.PHONY: vendor test-integration-run test-proc-run test check-cert install build vet uninstall

3
go.mod

@ -3,7 +3,7 @@ module github.com/go-delve/delve
go 1.16
require (
github.com/aquasecurity/libbpfgo v0.1.2-0.20210708203834-4928d36fafac
github.com/cilium/ebpf v0.7.0
github.com/cosiner/argv v0.1.0
github.com/creack/pty v1.1.9
github.com/derekparker/trie v0.0.0-20200317170641-1fdf38b7b0e9
@ -14,6 +14,7 @@ require (
github.com/peterh/liner v1.2.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.1.3
github.com/stretchr/testify v1.7.0 // indirect
go.starlark.net v0.0.0-20200821142938-949cc6f4b097
golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654

27
go.sum

@ -16,8 +16,6 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/aquasecurity/libbpfgo v0.1.2-0.20210708203834-4928d36fafac h1:oehMMAySC3p8eSwcwQ8lTXxeCkkPll+AwNesUNowbJ8=
github.com/aquasecurity/libbpfgo v0.1.2-0.20210708203834-4928d36fafac/go.mod h1:/+clceXE103FaXvVTIY2HAkQjxNtkra4DRWvZYr2SKw=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@ -29,6 +27,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -49,6 +49,8 @@ github.com/derekparker/trie v0.0.0-20200317170641-1fdf38b7b0e9/go.mod h1:D6ICZm0
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -70,6 +72,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-dap v0.6.0 h1:Y1RHGUtv3R8y6sXq2dtGRMYrFB2hSqyFVws7jucrzX4=
github.com/google/go-dap v0.6.0/go.mod h1:5q8aYQFnHOAZEMP+6vmq25HKYAEwE+LF5yh7JKrrhSQ=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@ -118,8 +122,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -195,9 +200,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.4.0 h1:OtISOGfH6sOWa1/qXqqAiOIAO6Z5J3AEAE18WAq6BiQ=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -233,8 +235,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -251,7 +251,6 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -279,10 +278,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -311,14 +307,11 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f h1:3MlESg/jvTr87F4ttA/q4B+uhe/q6qleC9/DP+IwQmY=
golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.8-0.20211028023602-8de2a7fd1736 h1:cw6nUxdoEN5iEIWYD8aAsTZ8iYjLVNiHAb7xz/80WO4=
golang.org/x/tools v0.1.8-0.20211028023602-8de2a7fd1736/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=

@ -1,4 +1,4 @@
#include "trace.bpf.h"
#include "include/trace.bpf.h"
#include <string.h>
#define STRING_KIND 24
@ -239,7 +239,7 @@ int uprobe__dlv_trace(struct pt_regs *ctx) {
parse_params(ctx, args->n_ret_parameters, parsed_args->ret_params);
}
bpf_ringbuf_submit(parsed_args, 0);
bpf_ringbuf_submit(parsed_args, BPF_RB_FORCE_WAKEUP);
return 0;
}

@ -1,13 +1,12 @@
//go:build ebpf
// +build ebpf
//go:build linux && amd64 && cgo && go1.16
// +build linux,amd64,cgo,go1.16
package ebpf
// #include "./trace_probe/function_vals.bpf.h"
// #include "./bpf/include/function_vals.bpf.h"
import "C"
import (
"debug/elf"
_ "embed"
"encoding/binary"
"errors"
"reflect"
@ -18,37 +17,37 @@ import (
"github.com/go-delve/delve/pkg/dwarf/godwarf"
"github.com/go-delve/delve/pkg/dwarf/op"
bpf "github.com/aquasecurity/libbpfgo"
"github.com/aquasecurity/libbpfgo/helpers"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/link"
"github.com/cilium/ebpf/ringbuf"
)
//go:embed trace_probe/trace.o
var TraceProbeBytes []byte
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -tags "go1.16" -target amd64 trace bpf/trace.bpf.c -- -I./bpf/include
const FakeAddressBase = 0xbeed000000000000
type EBPFContext struct {
bpfModule *bpf.Module
bpfProg *bpf.BPFProg
objs *traceObjects
bpfEvents chan []byte
bpfRingBuf *bpf.RingBuffer
bpfArgMap *bpf.BPFMap
bpfRingBuf *ringbuf.Reader
executable *link.Executable
bpfArgMap *ebpf.Map
parsedBpfEvents []RawUProbeParams
m sync.Mutex
}
func (ctx *EBPFContext) Close() {
if ctx.bpfModule != nil {
ctx.bpfModule.Close()
if ctx.objs != nil {
ctx.objs.Close()
}
}
func (ctx *EBPFContext) AttachUprobe(pid int, name string, offset uint32) error {
if ctx.bpfProg == nil {
func (ctx *EBPFContext) AttachUprobe(pid int, name string, offset uint64) error {
if ctx.executable == nil {
return errors.New("no eBPF program loaded")
}
_, err := ctx.bpfProg.AttachUprobe(pid, name, offset)
_, err := ctx.executable.Uprobe(name, ctx.objs.tracePrograms.UprobeDlvTrace, &link.UprobeOptions{PID: pid, Offset: offset})
return err
}
@ -58,7 +57,7 @@ func (ctx *EBPFContext) UpdateArgMap(key uint64, goidOffset int64, args []UProbe
}
params := createFunctionParameterList(key, goidOffset, args, isret)
params.g_addr_offset = C.longlong(gAddrOffset)
return ctx.bpfArgMap.Update(unsafe.Pointer(&key), unsafe.Pointer(&params))
return ctx.bpfArgMap.Update(unsafe.Pointer(&key), unsafe.Pointer(&params), ebpf.UpdateAny)
}
func (ctx *EBPFContext) GetBufferedTracepoints() []RawUProbeParams {
@ -75,47 +74,39 @@ func (ctx *EBPFContext) GetBufferedTracepoints() []RawUProbeParams {
return events
}
func SymbolToOffset(file, symbol string) (uint32, error) {
return helpers.SymbolToOffset(file, symbol)
}
func LoadEBPFTracingProgram(path string) (*EBPFContext, error) {
var (
ctx EBPFContext
err error
objs traceObjects
)
func LoadEBPFTracingProgram() (*EBPFContext, error) {
var ctx EBPFContext
var err error
ctx.bpfModule, err = bpf.NewModuleFromBuffer(TraceProbeBytes, "trace_probe/trace.o")
ctx.executable, err = link.OpenExecutable(path)
if err != nil {
return nil, err
}
ctx.bpfModule.BPFLoadObject()
prog, err := ctx.bpfModule.GetProgram("uprobe__dlv_trace")
if err != nil {
if err := loadTraceObjects(&objs, nil); err != nil {
return nil, err
}
ctx.bpfProg = prog
ctx.objs = &objs
ctx.bpfEvents = make(chan []byte)
ctx.bpfRingBuf, err = ctx.bpfModule.InitRingBuf("events", ctx.bpfEvents)
ctx.bpfRingBuf, err = ringbuf.NewReader(objs.Events)
if err != nil {
return nil, err
}
ctx.bpfRingBuf.Start()
ctx.bpfArgMap, err = ctx.bpfModule.GetMap("arg_map")
if err != nil {
return nil, err
}
ctx.bpfArgMap = objs.ArgMap
// TODO(derekparker): This should eventually be moved to a more generalized place.
go func() {
for {
b, ok := <-ctx.bpfEvents
if !ok {
e, err := ctx.bpfRingBuf.Read()
if err != nil {
return
}
parsed := parseFunctionParameterList(b)
parsed := parseFunctionParameterList(e.RawSample)
ctx.m.Lock()
ctx.parsedBpfEvents = append(ctx.parsedBpfEvents, parsed)
@ -208,7 +199,7 @@ func createFunctionParameterList(entry uint64, goidOffset int64, args []UProbeAr
return params
}
func AddressToOffset(f *elf.File, addr uint64) (uint32, error) {
func AddressToOffset(f *elf.File, addr uint64) (uint64, error) {
sectionsToSearchForSymbol := []*elf.Section{}
for i := range f.Sections {
@ -232,5 +223,5 @@ func AddressToOffset(f *elf.File, addr uint64) (uint32, error) {
return 0, errors.New("could not find symbol in executable sections of binary")
}
return uint32(addr - executableSection.Addr + executableSection.Offset), nil
return uint64(addr - executableSection.Addr + executableSection.Offset), nil
}

@ -1,5 +1,5 @@
//go:build !ebpf
// +build !ebpf
//go:build !linux || !amd64 || !go1.16 || !cgo
// +build !linux !amd64 !go1.16 !cgo
package ebpf
@ -35,7 +35,7 @@ func SymbolToOffset(file, symbol string) (uint32, error) {
return 0, errors.New("eBPF disabled")
}
func LoadEBPFTracingProgram() (*EBPFContext, error) {
func LoadEBPFTracingProgram(path string) (*EBPFContext, error) {
return nil, errors.New("eBPF disabled")
}

@ -0,0 +1,126 @@
// Code generated by bpf2go; DO NOT EDIT.
//go:build (386 || amd64) && go1.16
// +build 386 amd64
// +build go1.16
package ebpf
import (
"bytes"
_ "embed"
"fmt"
"io"
"github.com/cilium/ebpf"
)
// loadTrace returns the embedded CollectionSpec for trace.
func loadTrace() (*ebpf.CollectionSpec, error) {
reader := bytes.NewReader(_TraceBytes)
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
if err != nil {
return nil, fmt.Errorf("can't load trace: %w", err)
}
return spec, err
}
// loadTraceObjects loads trace and converts it into a struct.
//
// The following types are suitable as obj argument:
//
// *traceObjects
// *tracePrograms
// *traceMaps
//
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
func loadTraceObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
spec, err := loadTrace()
if err != nil {
return err
}
return spec.LoadAndAssign(obj, opts)
}
// traceSpecs contains maps and programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type traceSpecs struct {
traceProgramSpecs
traceMapSpecs
}
// traceSpecs contains programs before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type traceProgramSpecs struct {
UprobeDlvTrace *ebpf.ProgramSpec `ebpf:"uprobe__dlv_trace"`
}
// traceMapSpecs contains maps before they are loaded into the kernel.
//
// It can be passed ebpf.CollectionSpec.Assign.
type traceMapSpecs struct {
ArgMap *ebpf.MapSpec `ebpf:"arg_map"`
Events *ebpf.MapSpec `ebpf:"events"`
Heap *ebpf.MapSpec `ebpf:"heap"`
}
// traceObjects contains all objects after they have been loaded into the kernel.
//
// It can be passed to loadTraceObjects or ebpf.CollectionSpec.LoadAndAssign.
type traceObjects struct {
tracePrograms
traceMaps
}
func (o *traceObjects) Close() error {
return _TraceClose(
&o.tracePrograms,
&o.traceMaps,
)
}
// traceMaps contains all maps after they have been loaded into the kernel.
//
// It can be passed to loadTraceObjects or ebpf.CollectionSpec.LoadAndAssign.
type traceMaps struct {
ArgMap *ebpf.Map `ebpf:"arg_map"`
Events *ebpf.Map `ebpf:"events"`
Heap *ebpf.Map `ebpf:"heap"`
}
func (m *traceMaps) Close() error {
return _TraceClose(
m.ArgMap,
m.Events,
m.Heap,
)
}
// tracePrograms contains all programs after they have been loaded into the kernel.
//
// It can be passed to loadTraceObjects or ebpf.CollectionSpec.LoadAndAssign.
type tracePrograms struct {
UprobeDlvTrace *ebpf.Program `ebpf:"uprobe__dlv_trace"`
}
func (p *tracePrograms) Close() error {
return _TraceClose(
p.UprobeDlvTrace,
)
}
func _TraceClose(closers ...io.Closer) error {
for _, closer := range closers {
if err := closer.Close(); err != nil {
return err
}
}
return nil
}
// Do not access this directly.
//go:embed trace_bpfel_x86.o
var _TraceBytes []byte

Binary file not shown.

@ -1,6 +0,0 @@
FROM golang:1.16-alpine
RUN apk --no-cache update && apk --no-cache add clang llvm make gcc libc6-compat coreutils linux-headers musl-dev elfutils-dev libelf-static zlib-static make libbpf-dev libbpf git
WORKDIR /delve
CMD [ "/usr/bin/make", "build-bpf" ]

@ -1,5 +1,5 @@
//go:build ebpf
// +build ebpf
//go:build linux && amd64 && cgo && go1.16
// +build linux,amd64,cgo,go1.16
package native

@ -710,7 +710,7 @@ func (dbp *nativeProcess) SetUProbe(fnName string, goidOffset int64, args []ebpf
// Lazily load and initialize the BPF program upon request to set a uprobe.
if dbp.os.ebpf == nil {
var err error
dbp.os.ebpf, err = ebpf.LoadEBPFTracingProgram()
dbp.os.ebpf, err = ebpf.LoadEBPFTracingProgram(dbp.bi.Images[0].Path)
if err != nil {
return err
}
@ -735,10 +735,6 @@ func (dbp *nativeProcess) SetUProbe(fnName string, goidOffset int64, args []ebpf
}
debugname := dbp.bi.Images[0].Path
offset, err := ebpf.SymbolToOffset(debugname, fnName)
if err != nil {
return err
}
// First attach a uprobe at all return addresses. We do this instead of using a uretprobe
// for two reasons:
@ -782,7 +778,12 @@ func (dbp *nativeProcess) SetUProbe(fnName string, goidOffset int64, args []ebpf
}
}
return dbp.os.ebpf.AttachUprobe(dbp.Pid(), debugname, offset)
off, err := ebpf.AddressToOffset(f, fn.Entry)
if err != nil {
return err
}
return dbp.os.ebpf.AttachUprobe(dbp.Pid(), debugname, off)
}
func killProcess(pid int) error {

@ -1,5 +1,5 @@
//go:build !ebpf
// +build !ebpf
//go:build !linux || !amd64 || !go1.16 || !cgo
// +build !linux !amd64 !go1.16 !cgo
package native

@ -1 +0,0 @@
selftest/dist

@ -1,4 +0,0 @@
[submodule "selftest/libbpf-module"]
path = selftest/libbpf-module
url = https://github.com/libbpf/libbpf

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,29 +0,0 @@
TARGET_BPF := test/test.bpf.o
VMLINUX_H = test/vmlinux.h
GO_SRC := $(shell find . -type f -name '*.go')
BPF_SRC := $(shell find . -type f -name '*.bpf.c')
PWD := $(shell pwd)
LIBBPF_HEADERS := /usr/include/bpf
LIBBPF := "-lbpf"
.PHONY: all
all: test
$(VMLINUX_H):
bpftool btf dump file /sys/kernel/btf/vmlinux format c > test/vmlinux.h
go_env := CC=gcc CGO_CFLAGS="-I $(LIBBPF_HEADERS)" CGO_LDFLAGS="$(LIBBPF)"
.PHONY: test
test: $(TARGET_BPF) $(GO_SRC)
$(go_env) go test -ldflags '-extldflags "-static"' .
$(TARGET_BPF): $(BPF_SRC) $(VMLINUX_H)
clang \
-g -O2 -c -target bpf \
-o $@ $<
.PHONY: clean
clean:
rm $(TARGET_BPF) $(VMLINUX_H)

@ -1,61 +0,0 @@
# libbpfgo
<img src="docs/images/aqua-tux.png" width="150" height="auto">
___
libbpfgo is a Go library for working with Linux's [eBPF](https://ebpf.io/). It was created for [Tracee](https://github.com/aquasecurity/tracee), our open source Runtime Security and eBPF tracing tools written in Go. If you are interested in eBPF and it's applications, check out Tracee on Github: [https://github.com/aquasecurity/tracee](https://github.com/aquasecurity/tracee).
libbpfgo is built around libbpf - the standard library for interacting with eBPF from userspace, which is a C library maintained in Linux upstream. We have created libbpfgo as a thin Go wrapper around libbpf.
## Installing
libbpfgo is using CGO to interop with libbpf and will expect to be linked with libbpf at run or link time. Simply importing libbpfgo is not enough to get started, and you will need to fulfill the required dependency in one of the following ways:
1. Install the libbpf as a shared object in the system. Libbpf may already be packaged for you distribution, if not, you can build and install from source. More info [here](https://github.com/libbpf/libbpf).
1. Embed libbpf into your Go project as a vendored dependency. This means that the libbpf code is statically linked into the resulting binary, and there are no runtime dependencies. [Tracee](https://github.com/aquasecurity/tracee) takes this approach and you can take example from it's [Makefile](https://github.com/aquasecurity/tracee/blob/f8df7da6a27f729610992b6bd52e89d510fcf384/tracee-ebpf/Makefile#L62).
## Concepts
libbpfgo tries to make it natural for Go developers to use, by abstracting away C technicalities. For example, it will translate low level return codes into Go `error`, it will organize functionality around Go `struct`, and it will use `channel` as to let you consume events.
In a high level, this is a typical workflow for working with the library:
1. Compile your bpf program into an object file.
1. Initialize a `Module` struct - that is a unit of BPF functionality around your compiled object file.
1. Load bpf programs from the object file using the `BPFProg` struct.
1. Attach `BPFProg` to system facilities, for example to "raw tracepoints" or "kprobes" using the `BPFProg`'s associated functions.
1. Instantiate and manipulate BPF Maps via the `BPFMap` struct and it's associated methods.
1. Instantiate and manipulate Perf Buffer for communicating events from your BPF program to the driving userspace program, using the `RingBuffer` struct and it's associated objects.
## Example
```go
// initializing
import bpf "github.com/aquasecurity/libbpfgo"
...
bpfModule := bpf.NewModuleFromFile(bpfObjectPath)
bpfModule.BPFLoadObject()
// maps
mymap, _ := bpfModule.GetMap("mymap")
mymap.Update(key, value)
// ring buffer
rb, _ := bpfModule.InitRingBuffer("events", eventsChannel, buffSize)
rb.Start()
e := <-eventsChannel
```
Please check our github milestones for an idea of the project roadmap. The general goal is to fully implement/expose libbpf's API in Go as seamlessly as possible.
## Learn more
- Blost post on [how to Build eBPF Programs with libbpfgo](https://blog.aquasec.com/libbpf-ebpf-programs)
- The [selftests](./selftest) are small programs that use libbpfgo to verify functionality, they're good examples to look at for usage.
- [tracee-ebpf](https://github.com/aquasecurity/tracee/tree/main/tracee-ebpf) is a robust consumer of this package.
- Feel free to ask questions by creating a new [Discussion](https://github.com/aquasecurity/libbpfgo/discussions) and we'd love to help.

@ -1,8 +0,0 @@
module github.com/aquasecurity/libbpfgo
go 1.16
require (
github.com/stretchr/testify v1.7.0
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015
)

@ -1,13 +0,0 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

@ -1,603 +0,0 @@
package helpers
import (
"encoding/binary"
"net"
"strconv"
"strings"
)
// ParseInodeMode parses the `mode` bitmask argument of the `mknod` syscall
// http://man7.org/linux/man-pages/man7/inode.7.html
func ParseInodeMode(mode uint32) string {
var f []string
// File Type
switch {
case mode&0140000 == 0140000:
f = append(f, "S_IFSOCK")
case mode&0120000 == 0120000:
f = append(f, "S_IFLNK")
case mode&0100000 == 0100000:
f = append(f, "S_IFREG")
case mode&060000 == 060000:
f = append(f, "S_IFBLK")
case mode&040000 == 040000:
f = append(f, "S_IFDIR")
case mode&020000 == 020000:
f = append(f, "S_IFCHR")
case mode&010000 == 010000:
f = append(f, "S_IFIFO")
}
// File Mode
// Owner
if mode&00700 == 00700 {
f = append(f, "S_IRWXU")
} else {
if mode&00400 == 00400 {
f = append(f, "S_IRUSR")
}
if mode&00200 == 00200 {
f = append(f, "S_IWUSR")
}
if mode&00100 == 00100 {
f = append(f, "S_IXUSR")
}
}
// Group
if mode&00070 == 00070 {
f = append(f, "S_IRWXG")
} else {
if mode&00040 == 00040 {
f = append(f, "S_IRGRP")
}
if mode&00020 == 00020 {
f = append(f, "S_IWGRP")
}
if mode&00010 == 00010 {
f = append(f, "S_IXGRP")
}
}
// Others
if mode&00007 == 00007 {
f = append(f, "S_IRWXO")
} else {
if mode&00004 == 00004 {
f = append(f, "S_IROTH")
}
if mode&00002 == 00002 {
f = append(f, "S_IWOTH")
}
if mode&00001 == 00001 {
f = append(f, "S_IXOTH")
}
}
return strings.Join(f, "|")
}
// ParseMemProt parses the `prot` bitmask argument of the `mmap` syscall
// http://man7.org/linux/man-pages/man2/mmap.2.html
// https://elixir.bootlin.com/linux/v5.5.3/source/include/uapi/asm-generic/mman-common.h#L10
func ParseMemProt(prot uint32) string {
var f []string
if prot == 0x0 {
f = append(f, "PROT_NONE")
} else {
if prot&0x01 == 0x01 {
f = append(f, "PROT_READ")
}
if prot&0x02 == 0x02 {
f = append(f, "PROT_WRITE")
}
if prot&0x04 == 0x04 {
f = append(f, "PROT_EXEC")
}
}
return strings.Join(f, "|")
}
// ParseOpenFlags parses the `flags` bitmask argument of the `open` syscall
// http://man7.org/linux/man-pages/man2/open.2.html
// https://elixir.bootlin.com/linux/v5.5.3/source/include/uapi/asm-generic/fcntl.h
func ParseOpenFlags(flags uint32) string {
var f []string
//access mode
switch {
case flags&01 == 01:
f = append(f, "O_WRONLY")
case flags&02 == 02:
f = append(f, "O_RDWR")
default:
f = append(f, "O_RDONLY")
}
// file creation and status flags
if flags&0100 == 0100 {
f = append(f, "O_CREAT")
}
if flags&0200 == 0200 {
f = append(f, "O_EXCL")
}
if flags&0400 == 0400 {
f = append(f, "O_NOCTTY")
}
if flags&01000 == 01000 {
f = append(f, "O_TRUNC")
}
if flags&02000 == 02000 {
f = append(f, "O_APPEND")
}
if flags&04000 == 04000 {
f = append(f, "O_NONBLOCK")
}
if flags&04010000 == 04010000 {
f = append(f, "O_SYNC")
}
if flags&020000 == 020000 {
f = append(f, "O_ASYNC")
}
if flags&0100000 == 0100000 {
f = append(f, "O_LARGEFILE")
}
if flags&0200000 == 0200000 {
f = append(f, "O_DIRECTORY")
}
if flags&0400000 == 0400000 {
f = append(f, "O_NOFOLLOW")
}
if flags&02000000 == 02000000 {
f = append(f, "O_CLOEXEC")
}
if flags&040000 == 040000 {
f = append(f, "O_DIRECT")
}
if flags&01000000 == 01000000 {
f = append(f, "O_NOATIME")
}
if flags&010000000 == 010000000 {
f = append(f, "O_PATH")
}
if flags&020000000 == 020000000 {
f = append(f, "O_TMPFILE")
}
return strings.Join(f, "|")
}
// ParseAccessMode parses the mode from the `access` system call
// http://man7.org/linux/man-pages/man2/access.2.html
func ParseAccessMode(mode uint32) string {
var f []string
if mode == 0x0 {
f = append(f, "F_OK")
} else {
if mode&0x04 == 0x04 {
f = append(f, "R_OK")
}
if mode&0x02 == 0x02 {
f = append(f, "W_OK")
}
if mode&0x01 == 0x01 {
f = append(f, "X_OK")
}
}
return strings.Join(f, "|")
}
// ParseExecFlags parses the `flags` bitmask argument of the `execve` syscall
// http://man7.org/linux/man-pages/man2/axecveat.2.html
func ParseExecFlags(flags uint32) string {
var f []string
if flags&0x100 == 0x100 {
f = append(f, "AT_EMPTY_PATH")
}
if flags&0x1000 == 0x1000 {
f = append(f, "AT_SYMLINK_NOFOLLOW")
}
if len(f) == 0 {
f = append(f, "0")
}
return strings.Join(f, "|")
}
// ParseCloneFlags parses the `flags` bitmask argument of the `clone` syscall
// https://man7.org/linux/man-pages/man2/clone.2.html
func ParseCloneFlags(flags uint64) string {
var f []string
if flags&0x00000100 == 0x00000100 {
f = append(f, "CLONE_VM")
}
if flags&0x00000200 == 0x00000200 {
f = append(f, "CLONE_FS")
}
if flags&0x00000400 == 0x00000400 {
f = append(f, "CLONE_FILES")
}
if flags&0x00000800 == 0x00000800 {
f = append(f, "CLONE_SIGHAND")
}
if flags&0x00001000 == 0x00001000 {
f = append(f, "CLONE_PIDFD")
}
if flags&0x00002000 == 0x00002000 {
f = append(f, "CLONE_PTRACE")
}
if flags&0x00004000 == 0x00004000 {
f = append(f, "CLONE_VFORK")
}
if flags&0x00008000 == 0x00008000 {
f = append(f, "CLONE_PARENT")
}
if flags&0x00010000 == 0x00010000 {
f = append(f, "CLONE_THREAD")
}
if flags&0x00020000 == 0x00020000 {
f = append(f, "CLONE_NEWNS")
}
if flags&0x00040000 == 0x00040000 {
f = append(f, "CLONE_SYSVSEM")
}
if flags&0x00080000 == 0x00080000 {
f = append(f, "CLONE_SETTLS")
}
if flags&0x00100000 == 0x00100000 {
f = append(f, "CLONE_PARENT_SETTID")
}
if flags&0x00200000 == 0x00200000 {
f = append(f, "CLONE_CHILD_CLEARTID")
}
if flags&0x00400000 == 0x00400000 {
f = append(f, "CLONE_DETACHED")
}
if flags&0x00800000 == 0x00800000 {
f = append(f, "CLONE_UNTRACED")
}
if flags&0x01000000 == 0x01000000 {
f = append(f, "CLONE_CHILD_SETTID")
}
if flags&0x02000000 == 0x02000000 {
f = append(f, "CLONE_NEWCGROUP")
}
if flags&0x04000000 == 0x04000000 {
f = append(f, "CLONE_NEWUTS")
}
if flags&0x08000000 == 0x08000000 {
f = append(f, "CLONE_NEWIPC")
}
if flags&0x10000000 == 0x10000000 {
f = append(f, "CLONE_NEWUSER")
}
if flags&0x20000000 == 0x20000000 {
f = append(f, "CLONE_NEWPID")
}
if flags&0x40000000 == 0x40000000 {
f = append(f, "CLONE_NEWNET")
}
if flags&0x80000000 == 0x80000000 {
f = append(f, "CLONE_IO")
}
if len(f) == 0 {
f = append(f, "0")
}
return strings.Join(f, "|")
}
// ParseSocketType parses the `type` bitmask argument of the `socket` syscall
// http://man7.org/linux/man-pages/man2/socket.2.html
func ParseSocketType(st uint32) string {
var socketTypes = map[uint32]string{
1: "SOCK_STREAM",
2: "SOCK_DGRAM",
3: "SOCK_RAW",
4: "SOCK_RDM",
5: "SOCK_SEQPACKET",
6: "SOCK_DCCP",
10: "SOCK_PACKET",
}
var f []string
if stName, ok := socketTypes[st&0xf]; ok {
f = append(f, stName)
} else {
f = append(f, strconv.Itoa(int(st)))
}
if st&000004000 == 000004000 {
f = append(f, "SOCK_NONBLOCK")
}
if st&002000000 == 002000000 {
f = append(f, "SOCK_CLOEXEC")
}
return strings.Join(f, "|")
}
// ParseSocketDomain parses the `domain` bitmask argument of the `socket` syscall
// http://man7.org/linux/man-pages/man2/socket.2.html
func ParseSocketDomain(sd uint32) string {
var socketDomains = map[uint32]string{
0: "AF_UNSPEC",
1: "AF_UNIX",
2: "AF_INET",
3: "AF_AX25",
4: "AF_IPX",
5: "AF_APPLETALK",
6: "AF_NETROM",
7: "AF_BRIDGE",
8: "AF_ATMPVC",
9: "AF_X25",
10: "AF_INET6",
11: "AF_ROSE",
12: "AF_DECnet",
13: "AF_NETBEUI",
14: "AF_SECURITY",
15: "AF_KEY",
16: "AF_NETLINK",
17: "AF_PACKET",
18: "AF_ASH",
19: "AF_ECONET",
20: "AF_ATMSVC",
21: "AF_RDS",
22: "AF_SNA",
23: "AF_IRDA",
24: "AF_PPPOX",
25: "AF_WANPIPE",
26: "AF_LLC",
27: "AF_IB",
28: "AF_MPLS",
29: "AF_CAN",
30: "AF_TIPC",
31: "AF_BLUETOOTH",
32: "AF_IUCV",
33: "AF_RXRPC",
34: "AF_ISDN",
35: "AF_PHONET",
36: "AF_IEEE802154",
37: "AF_CAIF",
38: "AF_ALG",
39: "AF_NFC",
40: "AF_VSOCK",
41: "AF_KCM",
42: "AF_QIPCRTR",
43: "AF_SMC",
44: "AF_XDP",
}
var res string
if sdName, ok := socketDomains[sd]; ok {
res = sdName
} else {
res = strconv.Itoa(int(sd))
}
return res
}
// ParseUint32IP parses the IP address encoded as a uint32
func ParseUint32IP(in uint32) string {
ip := make(net.IP, net.IPv4len)
binary.BigEndian.PutUint32(ip, in)
return ip.String()
}
// Parse16BytesSliceIP parses the IP address encoded as 16 bytes long PrintBytesSliceIP
// It would be more correct to accept a [16]byte instead of variable lenth slice, but that would case unnecessary memory copying and type conversions
func Parse16BytesSliceIP(in []byte) string {
ip := net.IP(in)
return ip.String()
}
// ParseCapability parses the `capability` bitmask argument of the `cap_capable` function
// include/uapi/linux/capability.h
func ParseCapability(cap int32) string {
var capabilities = map[int32]string{
0: "CAP_CHOWN",
1: "CAP_DAC_OVERRIDE",
2: "CAP_DAC_READ_SEARCH",
3: "CAP_FOWNER",
4: "CAP_FSETID",
5: "CAP_KILL",
6: "CAP_SETGID",
7: "CAP_SETUID",
8: "CAP_SETPCAP",
9: "CAP_LINUX_IMMUTABLE",
10: "CAP_NET_BIND_SERVICE",
11: "CAP_NET_BROADCAST",
12: "CAP_NET_ADMIN",
13: "CAP_NET_RAW",
14: "CAP_IPC_LOCK",
15: "CAP_IPC_OWNER",
16: "CAP_SYS_MODULE",
17: "CAP_SYS_RAWIO",
18: "CAP_SYS_CHROOT",
19: "CAP_SYS_PTRACE",
20: "CAP_SYS_PACCT",
21: "CAP_SYS_ADMIN",
22: "CAP_SYS_BOOT",
23: "CAP_SYS_NICE",
24: "CAP_SYS_RESOURCE",
25: "CAP_SYS_TIME",
26: "CAP_SYS_TTY_CONFIG",
27: "CAP_MKNOD",
28: "CAP_LEASE",
29: "CAP_AUDIT_WRITE",
30: "CAP_AUDIT_CONTROL",
31: "CAP_SETFCAP",
32: "CAP_MAC_OVERRIDE",
33: "CAP_MAC_ADMIN",
34: "CAP_SYSLOG",
35: "CAP_WAKE_ALARM",
36: "CAP_BLOCK_SUSPEND",
37: "CAP_AUDIT_READ",
}
var res string
if capName, ok := capabilities[cap]; ok {
res = capName
} else {
res = strconv.Itoa(int(cap))
}
return res
}
// ParsePrctlOption parses the `option` argument of the `prctl` syscall
// http://man7.org/linux/man-pages/man2/prctl.2.html
func ParsePrctlOption(op int32) string {
var prctlOptions = map[int32]string{
1: "PR_SET_PDEATHSIG",
2: "PR_GET_PDEATHSIG",
3: "PR_GET_DUMPABLE",
4: "PR_SET_DUMPABLE",
5: "PR_GET_UNALIGN",
6: "PR_SET_UNALIGN",
7: "PR_GET_KEEPCAPS",
8: "PR_SET_KEEPCAPS",
9: "PR_GET_FPEMU",
10: "PR_SET_FPEMU",
11: "PR_GET_FPEXC",
12: "PR_SET_FPEXC",
13: "PR_GET_TIMING",
14: "PR_SET_TIMING",
15: "PR_SET_NAME",
16: "PR_GET_NAME",
19: "PR_GET_ENDIAN",
20: "PR_SET_ENDIAN",
21: "PR_GET_SECCOMP",
22: "PR_SET_SECCOMP",
23: "PR_CAPBSET_READ",
24: "PR_CAPBSET_DROP",
25: "PR_GET_TSC",
26: "PR_SET_TSC",
27: "PR_GET_SECUREBITS",
28: "PR_SET_SECUREBITS",
29: "PR_SET_TIMERSLACK",
30: "PR_GET_TIMERSLACK",
31: "PR_TASK_PERF_EVENTS_DISABLE",
32: "PR_TASK_PERF_EVENTS_ENABLE",
33: "PR_MCE_KILL",
34: "PR_MCE_KILL_GET",
35: "PR_SET_MM",
36: "PR_SET_CHILD_SUBREAPER",
37: "PR_GET_CHILD_SUBREAPER",
38: "PR_SET_NO_NEW_PRIVS",
39: "PR_GET_NO_NEW_PRIVS",
40: "PR_GET_TID_ADDRESS",
41: "PR_SET_THP_DISABLE",
42: "PR_GET_THP_DISABLE",
43: "PR_MPX_ENABLE_MANAGEMENT",
44: "PR_MPX_DISABLE_MANAGEMENT",
45: "PR_SET_FP_MODE",
46: "PR_GET_FP_MODE",
47: "PR_CAP_AMBIENT",
50: "PR_SVE_SET_VL",
51: "PR_SVE_GET_VL",
52: "PR_GET_SPECULATION_CTRL",
53: "PR_SET_SPECULATION_CTRL",
54: "PR_PAC_RESET_KEYS",
55: "PR_SET_TAGGED_ADDR_CTRL",
56: "PR_GET_TAGGED_ADDR_CTRL",
}
var res string
if opName, ok := prctlOptions[op]; ok {
res = opName
} else {
res = strconv.Itoa(int(op))
}
return res
}
// ParsePtraceRequest parses the `request` argument of the `ptrace` syscall
// http://man7.org/linux/man-pages/man2/ptrace.2.html
func ParsePtraceRequest(req int64) string {
var ptraceRequest = map[int64]string{
0: "PTRACE_TRACEME",
1: "PTRACE_PEEKTEXT",
2: "PTRACE_PEEKDATA",
3: "PTRACE_PEEKUSER",
4: "PTRACE_POKETEXT",
5: "PTRACE_POKEDATA",
6: "PTRACE_POKEUSER",
7: "PTRACE_CONT",
8: "PTRACE_KILL",
9: "PTRACE_SINGLESTEP",
12: "PTRACE_GETREGS",
13: "PTRACE_SETREGS",
14: "PTRACE_GETFPREGS",
15: "PTRACE_SETFPREGS",
16: "PTRACE_ATTACH",
17: "PTRACE_DETACH",
18: "PTRACE_GETFPXREGS",
19: "PTRACE_SETFPXREGS",
24: "PTRACE_SYSCALL",
0x4200: "PTRACE_SETOPTIONS",
0x4201: "PTRACE_GETEVENTMSG",
0x4202: "PTRACE_GETSIGINFO",
0x4203: "PTRACE_SETSIGINFO",
0x4204: "PTRACE_GETREGSET",
0x4205: "PTRACE_SETREGSET",
0x4206: "PTRACE_SEIZE",
0x4207: "PTRACE_INTERRUPT",
0x4208: "PTRACE_LISTEN",
0x4209: "PTRACE_PEEKSIGINFO",
0x420a: "PTRACE_GETSIGMASK",
0x420b: "PTRACE_SETSIGMASK",
0x420c: "PTRACE_SECCOMP_GET_FILTER",
0x420d: "PTRACE_SECCOMP_GET_METADATA",
}
var res string
if reqName, ok := ptraceRequest[req]; ok {
res = reqName
} else {
res = strconv.Itoa(int(req))
}
return res
}
// ParseBPFCmd parses the `cmd` argument of the `bpf` syscall
// https://man7.org/linux/man-pages/man2/bpf.2.html
func ParseBPFCmd(cmd int32) string {
var bpfCmd = map[int32]string{
0: "BPF_MAP_CREATE",
1: "BPF_MAP_LOOKUP_ELEM",
2: "BPF_MAP_UPDATE_ELEM",
3: "BPF_MAP_DELETE_ELEM",
4: "BPF_MAP_GET_NEXT_KEY",
5: "BPF_PROG_LOAD",
6: "BPF_OBJ_PIN",
7: "BPF_OBJ_GET",
8: "BPF_PROG_ATTACH",
9: "BPF_PROG_DETACH",
10: "BPF_PROG_TEST_RUN",
11: "BPF_PROG_GET_NEXT_ID",
12: "BPF_MAP_GET_NEXT_ID",
13: "BPF_PROG_GET_FD_BY_ID",
14: "BPF_MAP_GET_FD_BY_ID",
15: "BPF_OBJ_GET_INFO_BY_FD",
16: "BPF_PROG_QUERY",
17: "BPF_RAW_TRACEPOINT_OPEN",
18: "BPF_BTF_LOAD",
19: "BPF_BTF_GET_FD_BY_ID",
20: "BPF_TASK_FD_QUERY",
21: "BPF_MAP_LOOKUP_AND_DELETE_ELEM",
22: "BPF_MAP_FREEZE",
23: "BPF_BTF_GET_NEXT_ID",
24: "BPF_MAP_LOOKUP_BATCH",
25: "BPF_MAP_LOOKUP_AND_DELETE_BATCH",
26: "BPF_MAP_UPDATE_BATCH",
27: "BPF_MAP_DELETE_BATCH",
28: "BPF_LINK_CREATE",
29: "BPF_LINK_UPDATE",
30: "BPF_LINK_GET_FD_BY_ID",
31: "BPF_LINK_GET_NEXT_ID",
32: "BPF_ENABLE_STATS",
33: "BPF_ITER_CREATE",
34: "BPF_LINK_DETACH",
}
var res string
if cmdName, ok := bpfCmd[cmd]; ok {
res = cmdName
} else {
res = strconv.Itoa(int(cmd))
}
return res
}

@ -1,52 +0,0 @@
package helpers
import (
"debug/elf"
"errors"
"fmt"
)
// SymbolToOffset attempts to resolve a 'symbol' name in the binary found at
// 'path' to an offset. The offset can be used for attaching a u(ret)probe
func SymbolToOffset(path, symbol string) (uint32, error) {
f, err := elf.Open(path)
if err != nil {
return 0, fmt.Errorf("could not open elf file to resolve symbol offset: %v", err)
}
syms, err := f.Symbols()
if err != nil {
return 0, fmt.Errorf("could not open symbol section to resolve symbol offset: %v", err)
}
sectionsToSearchForSymbol := []*elf.Section{}
for i := range f.Sections {
if f.Sections[i].Flags == elf.SHF_ALLOC+elf.SHF_EXECINSTR {
sectionsToSearchForSymbol = append(sectionsToSearchForSymbol, f.Sections[i])
}
}
var executableSection *elf.Section
for i := range syms {
if syms[i].Name == symbol {
// Find what section the symbol is in by checking the executable section's
// addr space.
for m := range sectionsToSearchForSymbol {
if syms[i].Value > sectionsToSearchForSymbol[m].Addr &&
syms[i].Value < sectionsToSearchForSymbol[m].Addr+sectionsToSearchForSymbol[m].Size {
executableSection = sectionsToSearchForSymbol[m]
}
}
if executableSection == nil {
return 0, errors.New("could not find symbol in executable sections of binary")
}
return uint32(syms[i].Value - executableSection.Addr + executableSection.Offset), nil
}
}
return 0, fmt.Errorf("symbol not found")
}

@ -1,228 +0,0 @@
package helpers
import (
"bufio"
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"strings"
"golang.org/x/sys/unix"
)
// These constants are a limited number of the total kernel config options,
// but are provided because they are most relevant for BPF
// development.
const (
CONFIG_BPF uint32 = iota + 1
CONFIG_BPF_SYSCALL
CONFIG_HAVE_EBPF_JIT
CONFIG_BPF_JIT
CONFIG_BPF_JIT_ALWAYS_ON
CONFIG_CGROUPS
CONFIG_CGROUP_BPF
CONFIG_CGROUP_NET_CLASSID
CONFIG_SOCK_CGROUP_DATA
CONFIG_BPF_EVENTS
CONFIG_KPROBE_EVENTS
CONFIG_UPROBE_EVENTS
CONFIG_TRACING
CONFIG_FTRACE_SYSCALLS
CONFIG_FUNCTION_ERROR_INJECTION
CONFIG_BPF_KPROBE_OVERRIDE
CONFIG_NET
CONFIG_XDP_SOCKETS
CONFIG_LWTUNNEL_BPF
CONFIG_NET_ACT_BPF
CONFIG_NET_CLS_BPF
CONFIG_NET_CLS_ACT
CONFIG_NET_SCH_INGRESS
CONFIG_XFRM
CONFIG_IP_ROUTE_CLASSID
CONFIG_IPV6_SEG6_BPF
CONFIG_BPF_LIRC_MODE2
CONFIG_BPF_STREAM_PARSER
CONFIG_NETFILTER_XT_MATCH_BPF
CONFIG_BPFILTER
CONFIG_BPFILTER_UMH
CONFIG_TEST_BPF
CONFIG_HZ
CONFIG_DEBUG_INFO_BTF
CONFIG_DEBUG_INFO_BTF_MODULES
CONFIG_BPF_LSM
CONFIG_BPF_PRELOAD
CONFIG_BPF_PRELOAD_UMD
)
var KernelConfigKeyStringToID map[string]uint32 = map[string]uint32{
"CONFIG_BPF": CONFIG_BPF,
"CONFIG_BPF_SYSCALL": CONFIG_BPF_SYSCALL,
"CONFIG_HAVE_EBPF_JIT": CONFIG_HAVE_EBPF_JIT,
"CONFIG_BPF_JIT": CONFIG_BPF_JIT,
"CONFIG_BPF_JIT_ALWAYS_ON": CONFIG_BPF_JIT_ALWAYS_ON,
"CONFIG_CGROUPS": CONFIG_CGROUPS,
"CONFIG_CGROUP_BPF": CONFIG_CGROUP_BPF,
"CONFIG_CGROUP_NET_CLASSID": CONFIG_CGROUP_NET_CLASSID,
"CONFIG_SOCK_CGROUP_DATA": CONFIG_SOCK_CGROUP_DATA,
"CONFIG_BPF_EVENTS": CONFIG_BPF_EVENTS,
"CONFIG_KPROBE_EVENTS": CONFIG_KPROBE_EVENTS,
"CONFIG_UPROBE_EVENTS": CONFIG_UPROBE_EVENTS,
"CONFIG_TRACING": CONFIG_TRACING,
"CONFIG_FTRACE_SYSCALLS": CONFIG_FTRACE_SYSCALLS,
"CONFIG_FUNCTION_ERROR_INJECTION": CONFIG_FUNCTION_ERROR_INJECTION,
"CONFIG_BPF_KPROBE_OVERRIDE": CONFIG_BPF_KPROBE_OVERRIDE,
"CONFIG_NET": CONFIG_NET,
"CONFIG_XDP_SOCKETS": CONFIG_XDP_SOCKETS,
"CONFIG_LWTUNNEL_BPF": CONFIG_LWTUNNEL_BPF,
"CONFIG_NET_ACT_BPF": CONFIG_NET_ACT_BPF,
"CONFIG_NET_CLS_BPF": CONFIG_NET_CLS_BPF,
"CONFIG_NET_CLS_ACT": CONFIG_NET_CLS_ACT,
"CONFIG_NET_SCH_INGRESS": CONFIG_NET_SCH_INGRESS,
"CONFIG_XFRM": CONFIG_XFRM,
"CONFIG_IP_ROUTE_CLASSID": CONFIG_IP_ROUTE_CLASSID,
"CONFIG_IPV6_SEG6_BPF": CONFIG_IPV6_SEG6_BPF,
"CONFIG_BPF_LIRC_MODE2": CONFIG_BPF_LIRC_MODE2,
"CONFIG_BPF_STREAM_PARSER": CONFIG_BPF_STREAM_PARSER,
"CONFIG_NETFILTER_XT_MATCH_BPF": CONFIG_NETFILTER_XT_MATCH_BPF,
"CONFIG_BPFILTER": CONFIG_BPFILTER,
"CONFIG_BPFILTER_UMH": CONFIG_BPFILTER_UMH,
"CONFIG_TEST_BPF": CONFIG_TEST_BPF,
"CONFIG_HZ": CONFIG_HZ,
"CONFIG_DEBUG_INFO_BTF": CONFIG_DEBUG_INFO_BTF,
"CONFIG_DEBUG_INFO_BTF_MODULES": CONFIG_DEBUG_INFO_BTF_MODULES,
"CONFIG_BPF_LSM": CONFIG_BPF_LSM,
"CONFIG_BPF_PRELOAD": CONFIG_BPF_PRELOAD,
"CONFIG_BPF_PRELOAD_UMD": CONFIG_BPF_PRELOAD_UMD,
}
var KernelConfigKeyIDToString map[uint32]string = map[uint32]string{
CONFIG_BPF: "CONFIG_BPF",
CONFIG_BPF_SYSCALL: "CONFIG_BPF_SYSCALL",
CONFIG_HAVE_EBPF_JIT: "CONFIG_HAVE_EBPF_JIT",
CONFIG_BPF_JIT: "CONFIG_BPF_JIT",
CONFIG_BPF_JIT_ALWAYS_ON: "CONFIG_BPF_JIT_ALWAYS_ON",
CONFIG_CGROUPS: "CONFIG_CGROUPS",
CONFIG_CGROUP_BPF: "CONFIG_CGROUP_BPF",
CONFIG_CGROUP_NET_CLASSID: "CONFIG_CGROUP_NET_CLASSID",
CONFIG_SOCK_CGROUP_DATA: "CONFIG_SOCK_CGROUP_DATA",
CONFIG_BPF_EVENTS: "CONFIG_BPF_EVENTS",
CONFIG_KPROBE_EVENTS: "CONFIG_KPROBE_EVENTS",
CONFIG_UPROBE_EVENTS: "CONFIG_UPROBE_EVENTS",
CONFIG_TRACING: "CONFIG_TRACING",
CONFIG_FTRACE_SYSCALLS: "CONFIG_FTRACE_SYSCALLS",
CONFIG_FUNCTION_ERROR_INJECTION: "CONFIG_FUNCTION_ERROR_INJECTION",
CONFIG_BPF_KPROBE_OVERRIDE: "CONFIG_BPF_KPROBE_OVERRIDE",
CONFIG_NET: "CONFIG_NET",
CONFIG_XDP_SOCKETS: "CONFIG_XDP_SOCKETS",
CONFIG_LWTUNNEL_BPF: "CONFIG_LWTUNNEL_BPF",
CONFIG_NET_ACT_BPF: "CONFIG_NET_ACT_BPF",
CONFIG_NET_CLS_BPF: "CONFIG_NET_CLS_BPF",
CONFIG_NET_CLS_ACT: "CONFIG_NET_CLS_ACT",
CONFIG_NET_SCH_INGRESS: "CONFIG_NET_SCH_INGRESS",
CONFIG_XFRM: "CONFIG_XFRM",
CONFIG_IP_ROUTE_CLASSID: "CONFIG_IP_ROUTE_CLASSID",
CONFIG_IPV6_SEG6_BPF: "CONFIG_IPV6_SEG6_BPF",
CONFIG_BPF_LIRC_MODE2: "CONFIG_BPF_LIRC_MODE2",
CONFIG_BPF_STREAM_PARSER: "CONFIG_BPF_STREAM_PARSER",
CONFIG_NETFILTER_XT_MATCH_BPF: "CONFIG_NETFILTER_XT_MATCH_BPF",
CONFIG_BPFILTER: "CONFIG_BPFILTER",
CONFIG_BPFILTER_UMH: "CONFIG_BPFILTER_UMH",
CONFIG_TEST_BPF: "CONFIG_TEST_BPF",
CONFIG_HZ: "CONFIG_HZ",
CONFIG_DEBUG_INFO_BTF: "CONFIG_DEBUG_INFO_BTF",
CONFIG_DEBUG_INFO_BTF_MODULES: "CONFIG_DEBUG_INFO_BTF_MODULES",
CONFIG_BPF_LSM: "CONFIG_BPF_LSM",
CONFIG_BPF_PRELOAD: "CONFIG_BPF_PRELOAD",
CONFIG_BPF_PRELOAD_UMD: "CONFIG_BPF_PRELOAD_UMD",
}
type KernelConfig map[uint32]string
// InitKernelConfig populates the passed KernelConfig
// by attempting to read the kernel config into it from:
// /proc/config-$(uname -r)
// or
// /boot/config.gz
func (k KernelConfig) InitKernelConfig() error {
x := unix.Utsname{}
err := unix.Uname(&x)
if err != nil {
return fmt.Errorf("could not determine uname release: %v", err)
}
bootConfigPath := fmt.Sprintf("/boot/config-%s", bytes.Trim(x.Release[:], "\x00"))
err = k.getBootConfigByPath(bootConfigPath)
if err == nil {
return nil
}
err2 := k.getProcGZConfigByPath("/proc/config.gz")
if err != nil {
return fmt.Errorf("%v %v", err, err2)
}
return nil
}
// GetKernelConfigValue retrieves a value from the kernel config
// If the config value does not exist an error will be returned
func (k KernelConfig) GetKernelConfigValue(key uint32) (string, error) {
v, exists := k[key]
if !exists {
return "", errors.New("kernel config value does not exist, it's possible this option is not present in your kernel version or the KernelConfig has not been initialized")
}
return v, nil
}
func (k KernelConfig) getBootConfigByPath(bootConfigPath string) error {
configFile, err := os.Open(bootConfigPath)
if err != nil {
return fmt.Errorf("could not open %s: %v", bootConfigPath, err)
}
k.readConfigFromScanner(configFile)
return nil
}
func (k KernelConfig) getProcGZConfigByPath(procConfigPath string) error {
configFile, err := os.Open(procConfigPath)
if err != nil {
return fmt.Errorf("could not open %s: %v", procConfigPath, err)
}
return k.getProcGZConfig(configFile)
}
func (k KernelConfig) getProcGZConfig(reader io.Reader) error {
zreader, err := gzip.NewReader(reader)
if err != nil {
return err
}
k.readConfigFromScanner(zreader)
return nil
}
func (k KernelConfig) readConfigFromScanner(reader io.Reader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
kv := strings.Split(scanner.Text(), "=")
if len(kv) != 2 {
continue
}
configKeyID := KernelConfigKeyStringToID[kv[0]]
if configKeyID == 0 {
continue
}
k[configKeyID] = kv[1]
}
}

@ -1,74 +0,0 @@
package helpers
import (
"sync"
)
type slot struct {
value interface{}
used bool
}
// RWArray allows for multiple concurrent readers but
// only a single writer. The writers lock a mutex while the readers
// are lock free.
// It is implemented as an array of slots where each slot holds a
// value (of type interface{}) and a boolean marker to indicate if it's
// in use or not. The insertion (Put) performs a linear probe
// looking for an available slot as indicated by the in-use marker.
// While probing, it is not touching the value itself, as it's
// being read without a lock by the readers.
type RWArray struct {
slots []slot
mux sync.Mutex
}
func NewRWArray(capacity uint) RWArray {
return RWArray{
slots: make([]slot, capacity),
}
}
func (a *RWArray) Put(v interface{}) int {
a.mux.Lock()
defer a.mux.Unlock()
limit := len(a.slots)
for i := 0; i < limit; i++ {
if !a.slots[i].used {
a.slots[i].value = v
a.slots[i].used = true
return i
}
}
return -1
}
func (a *RWArray) Remove(index uint) {
a.mux.Lock()
defer a.mux.Unlock()
if int(index) >= len(a.slots) {
return
}
a.slots[index].value = nil
a.slots[index].used = false
}
func (a *RWArray) Get(index uint) interface{} {
if int(index) >= len(a.slots) {
return nil
}
// N.B. If slot[index].used == false, this is technically
// a race since Put() might be putting the value in there
// at the same time.
return a.slots[index].value
}
func (a *RWArray) Capacity() uint {
return uint(len(a.slots))
}

@ -1,33 +0,0 @@
package helpers
import (
"bufio"
"fmt"
"os"
)
// TracePipeListen reads data from the trace pipe that bpf_trace_printk() writes to,
// (/sys/kernel/debug/tracing/trace_pipe).
// It writes the data to stdout. The pipe is global, so this function is not
// associated with any BPF program. It is recommended to use bpf_trace_printk()
// and this function for debug purposes only.
// This is a blocking function intended to be called from a goroutine.
func TracePipeListen() error {
f, err := os.Open("/sys/kernel/debug/tracing/trace_pipe")
if err != nil {
return fmt.Errorf("failed to open trace pipe: %v", err)
}
defer f.Close()
r := bufio.NewReader(f)
b := make([]byte, 1024)
for {
len, err := r.Read(b)
if err != nil {
return fmt.Errorf("failed to read from trace pipe: %v", err)
}
s := string(b[:len])
fmt.Println(s)
}
}

@ -1,30 +0,0 @@
package libbpfgo
import (
"C"
"unsafe"
)
// This callback definition needs to be in a different file from where it is declared in C
// Otherwise, multiple definition compilation error will occur
//export perfCallback
func perfCallback(ctx unsafe.Pointer, cpu C.int, data unsafe.Pointer, size C.int) {
pb := eventChannels.Get(uint(uintptr(ctx))).(*PerfBuffer)
pb.eventsChan <- C.GoBytes(data, size)
}
//export perfLostCallback
func perfLostCallback(ctx unsafe.Pointer, cpu C.int, cnt C.ulonglong) {
pb := eventChannels.Get(uint(uintptr(ctx))).(*PerfBuffer)
if pb.lostChan != nil {
pb.lostChan <- uint64(cnt)
}
}
//export ringbufferCallback
func ringbufferCallback(ctx unsafe.Pointer, data unsafe.Pointer, size C.int) C.int {
ch := eventChannels.Get(uint(uintptr(ctx))).(chan []byte)
ch <- C.GoBytes(data, size)
return C.int(0)
}

File diff suppressed because it is too large Load Diff

17
vendor/github.com/cilium/ebpf/.clang-format generated vendored Normal file

@ -0,0 +1,17 @@
---
Language: Cpp
BasedOnStyle: LLVM
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: true
AlignEscapedNewlines: DontAlign
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: false
BreakBeforeBraces: Attach
IndentWidth: 4
KeepEmptyLinesAtTheStartOfBlocks: false
TabWidth: 4
UseTab: ForContinuationAndIndentation
ColumnLimit: 1000
...

14
vendor/github.com/cilium/ebpf/.gitignore generated vendored Normal file

@ -0,0 +1,14 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
*.o
!*_bpf*.o
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

28
vendor/github.com/cilium/ebpf/.golangci.yaml generated vendored Normal file

@ -0,0 +1,28 @@
---
issues:
exclude-rules:
# syscall param structs will have unused fields in Go code.
- path: syscall.*.go
linters:
- structcheck
linters:
disable-all: true
enable:
- deadcode
- errcheck
- goimports
- gosimple
- govet
- ineffassign
- misspell
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
# Could be enabled later:
# - gocyclo
# - maligned
# - gosec

80
vendor/github.com/cilium/ebpf/ARCHITECTURE.md generated vendored Normal file

@ -0,0 +1,80 @@
Architecture of the library
===
ELF -> Specifications -> Objects -> Links
ELF
---
BPF is usually produced by using Clang to compile a subset of C. Clang outputs
an ELF file which contains program byte code (aka BPF), but also metadata for
maps used by the program. The metadata follows the conventions set by libbpf
shipped with the kernel. Certain ELF sections have special meaning
and contain structures defined by libbpf. Newer versions of clang emit
additional metadata in BPF Type Format (aka BTF).
The library aims to be compatible with libbpf so that moving from a C toolchain
to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
is tested against the Linux selftests and avoids introducing custom behaviour
if possible.
The output of the ELF reader is a `CollectionSpec` which encodes
all of the information contained in the ELF in a form that is easy to work with
in Go.
### BTF
The BPF Type Format describes more than just the types used by a BPF program. It
includes debug aids like which source line corresponds to which instructions and
what global variables are used.
[BTF parsing](internal/btf/) lives in a separate internal package since exposing
it would mean an additional maintenance burden, and because the API still
has sharp corners. The most important concept is the `btf.Type` interface, which
also describes things that aren't really types like `.rodata` or `.bss` sections.
`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if
one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as
we write more code that deals with it.
Specifications
---
`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel
objects and contain everything necessary to execute the relevant `bpf(2)`
syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to
modify clang-compiled BPF code, for example to rewrite constants. At the same
time the [asm](asm/) package provides an assembler that can be used to generate
`ProgramSpec` on the fly.
Creating a spec should never require any privileges or be restricted in any way,
for example by only allowing programs in native endianness. This ensures that
the library stays flexible.
Objects
---
`Program` and `Map` are the result of loading specs into the kernel. Sometimes
loading a spec will fail because the kernel is too old, or a feature is not
enabled. There are multiple ways the library deals with that:
* Fallback: older kernels don't allow naming programs and maps. The library
automatically detects support for names, and omits them during load if
necessary. This works since name is primarily a debug aid.
* Sentinel error: sometimes it's possible to detect that a feature isn't available.
In that case the library will return an error wrapping `ErrNotSupported`.
This is also useful to skip tests that can't run on the current kernel.
Once program and map objects are loaded they expose the kernel's low-level API,
e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
wrappers on top of the low-level API, like `MapIterator`. The low-level API is
useful when our higher-level API doesn't support a particular use case.
Links
---
BPF can be attached to many different points in the kernel and newer BPF hooks
tend to use bpf_link to do so. Older hooks unfortunately use a combination of
syscalls, netlink messages, etc. Adding support for a new link type should not
pull in large dependencies like netlink, so XDP programs or tracepoints are
out of scope.

46
vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md generated vendored Normal file

@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

40
vendor/github.com/cilium/ebpf/CONTRIBUTING.md generated vendored Normal file

@ -0,0 +1,40 @@
# How to contribute
Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
the form of pull requests and issues reporting bugs or suggesting new features
are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
a better understanding for the high-level goals.
New features must be accompanied by tests. Before starting work on any large
feature, please [join](https://ebpf.io/slack) the
[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to
discuss the design first.
When submitting pull requests, consider writing details about what problem you
are solving and why the proposed approach solves that problem in commit messages
and/or pull request description to help future library users and maintainers to
reason about the proposed changes.
## Running the tests
Many of the tests require privileges to set resource limits and load eBPF code.
The easiest way to obtain these is to run the tests with `sudo`.
To test the current package with your local kernel you can simply run:
```
go test -exec sudo ./...
```
To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
Examples:
```bash
# Run all tests on a 5.4 kernel
./run-tests.sh 5.4
# Run a subset of tests:
./run-tests.sh 5.4 go test ./link
```

23
vendor/github.com/cilium/ebpf/LICENSE generated vendored Normal file

@ -0,0 +1,23 @@
MIT License
Copyright (c) 2017 Nathan Sweet
Copyright (c) 2018, 2019 Cloudflare
Copyright (c) 2019 Authors of Cilium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

73
vendor/github.com/cilium/ebpf/Makefile generated vendored Normal file

@ -0,0 +1,73 @@
# The development version of clang is distributed as the 'clang' binary,
# while stable/released versions have a version number attached.
# Pin the default clang to a stable version.
CLANG ?= clang-12
CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
# Obtain an absolute path to the directory of the Makefile.
# Assume the Makefile is in the root of the repository.
REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
# clang <8 doesn't tag relocs properly (STT_NOTYPE)
# clang 9 is the first version emitting BTF
TARGETS := \
testdata/loader-clang-7 \
testdata/loader-clang-9 \
testdata/loader-$(CLANG) \
testdata/btf_map_init \
testdata/invalid_map \
testdata/raw_tracepoint \
testdata/invalid_map_static \
testdata/invalid_btf_map_init \
testdata/strings \
testdata/freplace \
testdata/iproute2_map_compat \
internal/btf/testdata/relocs
.PHONY: all clean docker-all docker-shell
.DEFAULT_TARGET = docker-all
# Build all ELF binaries using a Dockerized LLVM toolchain.
docker-all:
docker run --rm --user "${UIDGID}" \
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
"${IMAGE}:${VERSION}" \
make all
# (debug) Drop the user into a shell inside the Docker container as root.
docker-shell:
docker run --rm -ti \
-v "${REPODIR}":/ebpf -w /ebpf \
"${IMAGE}:${VERSION}"
clean:
-$(RM) testdata/*.elf
-$(RM) internal/btf/testdata/*.elf
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
testdata/loader-%-el.elf: testdata/loader.c
$* $(CFLAGS) -mlittle-endian -c $< -o $@
testdata/loader-%-eb.elf: testdata/loader.c
$* $(CFLAGS) -mbig-endian -c $< -o $@
%-el.elf: %.c
$(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@
%-eb.elf : %.c
$(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@
# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
.PHONY: vmlinux-btf
vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz
internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX)
objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@"

70
vendor/github.com/cilium/ebpf/README.md generated vendored Normal file

@ -0,0 +1,70 @@
# eBPF
[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
![HoneyGopher](.github/images/cilium-ebpf.png)
eBPF is a pure Go library that provides utilities for loading, compiling, and
debugging eBPF programs. It has minimal external dependencies and is intended to
be used in long running processes.
The library is maintained by [Cloudflare](https://www.cloudflare.com) and
[Cilium](https://www.cilium.io).
See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem.
## Getting Started
A small collection of Go and eBPF programs that serve as examples for building
your own tools can be found under [examples/](examples/).
Contributions are highly encouraged, as they highlight certain use cases of
eBPF and the library, and help shape the future of the project.
## Getting Help
Please
[join](https://ebpf.io/slack) the
[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
have questions regarding the library.
## Packages
This library includes the following packages:
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
assembler, allowing you to write eBPF assembly instructions directly
within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.)
* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
compiling and embedding eBPF programs written in C within Go code. As well as
compiling the C code, it auto-generates Go code for loading and manipulating
the eBPF program and map objects.
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
to various hooks
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
`PERF_EVENT_ARRAY`
* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a
`BPF_MAP_TYPE_RINGBUF` map
## Requirements
* A version of Go that is [supported by
upstream](https://golang.org/doc/devel/release.html#policy)
* Linux >= 4.9. CI is run against LTS releases.
## Regenerating Testdata
Run `make` in the root of this repository to rebuild testdata in all
subpackages. This requires Docker, as it relies on a standardized build
environment to keep the build output stable.
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
## License
MIT
### eBPF Gopher
The eBPF honeygopher is based on the Go gopher designed by Renee French.

149
vendor/github.com/cilium/ebpf/asm/alu.go generated vendored Normal file

@ -0,0 +1,149 @@
package asm
//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp
// Source of ALU / ALU64 / Branch operations
//
// msb lsb
// +----+-+---+
// |op |S|cls|
// +----+-+---+
type Source uint8
const sourceMask OpCode = 0x08
// Source bitmask
const (
// InvalidSource is returned by getters when invoked
// on non ALU / branch OpCodes.
InvalidSource Source = 0xff
// ImmSource src is from constant
ImmSource Source = 0x00
// RegSource src is from register
RegSource Source = 0x08
)
// The Endianness of a byte swap instruction.
type Endianness uint8
const endianMask = sourceMask
// Endian flags
const (
InvalidEndian Endianness = 0xff
// Convert to little endian
LE Endianness = 0x00
// Convert to big endian
BE Endianness = 0x08
)
// ALUOp are ALU / ALU64 operations
//
// msb lsb
// +----+-+---+
// |OP |s|cls|
// +----+-+---+
type ALUOp uint8
const aluMask OpCode = 0xf0
const (
// InvalidALUOp is returned by getters when invoked
// on non ALU OpCodes
InvalidALUOp ALUOp = 0xff
// Add - addition
Add ALUOp = 0x00
// Sub - subtraction
Sub ALUOp = 0x10
// Mul - multiplication
Mul ALUOp = 0x20
// Div - division
Div ALUOp = 0x30
// Or - bitwise or
Or ALUOp = 0x40
// And - bitwise and
And ALUOp = 0x50
// LSh - bitwise shift left
LSh ALUOp = 0x60
// RSh - bitwise shift right
RSh ALUOp = 0x70
// Neg - sign/unsign signing bit
Neg ALUOp = 0x80
// Mod - modulo
Mod ALUOp = 0x90
// Xor - bitwise xor
Xor ALUOp = 0xa0
// Mov - move value from one place to another
Mov ALUOp = 0xb0
// ArSh - arithmatic shift
ArSh ALUOp = 0xc0
// Swap - endian conversions
Swap ALUOp = 0xd0
)
// HostTo converts from host to another endianness.
func HostTo(endian Endianness, dst Register, size Size) Instruction {
var imm int64
switch size {
case Half:
imm = 16
case Word:
imm = 32
case DWord:
imm = 64
default:
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
Dst: dst,
Constant: imm,
}
}
// Op returns the OpCode for an ALU operation with a given source.
func (op ALUOp) Op(source Source) OpCode {
return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
}
// Reg emits `dst (op) src`.
func (op ALUOp) Reg(dst, src Register) Instruction {
return Instruction{
OpCode: op.Op(RegSource),
Dst: dst,
Src: src,
}
}
// Imm emits `dst (op) value`.
func (op ALUOp) Imm(dst Register, value int32) Instruction {
return Instruction{
OpCode: op.Op(ImmSource),
Dst: dst,
Constant: int64(value),
}
}
// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
func (op ALUOp) Op32(source Source) OpCode {
return OpCode(ALUClass).SetALUOp(op).SetSource(source)
}
// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
func (op ALUOp) Reg32(dst, src Register) Instruction {
return Instruction{
OpCode: op.Op32(RegSource),
Dst: dst,
Src: src,
}
}
// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
func (op ALUOp) Imm32(dst Register, value int32) Instruction {
return Instruction{
OpCode: op.Op32(ImmSource),
Dst: dst,
Constant: int64(value),
}
}

107
vendor/github.com/cilium/ebpf/asm/alu_string.go generated vendored Normal file

@ -0,0 +1,107 @@
// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidSource-255]
_ = x[ImmSource-0]
_ = x[RegSource-8]
}
const (
_Source_name_0 = "ImmSource"
_Source_name_1 = "RegSource"
_Source_name_2 = "InvalidSource"
)
func (i Source) String() string {
switch {
case i == 0:
return _Source_name_0
case i == 8:
return _Source_name_1
case i == 255:
return _Source_name_2
default:
return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidEndian-255]
_ = x[LE-0]
_ = x[BE-8]
}
const (
_Endianness_name_0 = "LE"
_Endianness_name_1 = "BE"
_Endianness_name_2 = "InvalidEndian"
)
func (i Endianness) String() string {
switch {
case i == 0:
return _Endianness_name_0
case i == 8:
return _Endianness_name_1
case i == 255:
return _Endianness_name_2
default:
return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidALUOp-255]
_ = x[Add-0]
_ = x[Sub-16]
_ = x[Mul-32]
_ = x[Div-48]
_ = x[Or-64]
_ = x[And-80]
_ = x[LSh-96]
_ = x[RSh-112]
_ = x[Neg-128]
_ = x[Mod-144]
_ = x[Xor-160]
_ = x[Mov-176]
_ = x[ArSh-192]
_ = x[Swap-208]
}
const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp"
var _ALUOp_map = map[ALUOp]string{
0: _ALUOp_name[0:3],
16: _ALUOp_name[3:6],
32: _ALUOp_name[6:9],
48: _ALUOp_name[9:12],
64: _ALUOp_name[12:14],
80: _ALUOp_name[14:17],
96: _ALUOp_name[17:20],
112: _ALUOp_name[20:23],
128: _ALUOp_name[23:26],
144: _ALUOp_name[26:29],
160: _ALUOp_name[29:32],
176: _ALUOp_name[32:35],
192: _ALUOp_name[35:39],
208: _ALUOp_name[39:43],
255: _ALUOp_name[43:55],
}
func (i ALUOp) String() string {
if str, ok := _ALUOp_map[i]; ok {
return str
}
return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
}

2
vendor/github.com/cilium/ebpf/asm/doc.go generated vendored Normal file

@ -0,0 +1,2 @@
// Package asm is an assembler for eBPF bytecode.
package asm

201
vendor/github.com/cilium/ebpf/asm/func.go generated vendored Normal file

@ -0,0 +1,201 @@
package asm
//go:generate stringer -output func_string.go -type=BuiltinFunc
// BuiltinFunc is a built-in eBPF function.
type BuiltinFunc int32
// eBPF built-in functions
//
// You can regenerate this list using the following gawk script:
//
// /FN\(.+\),/ {
// match($1, /\((.+)\)/, r)
// split(r[1], p, "_")
// printf "Fn"
// for (i in p) {
// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
// }
// print ""
// }
//
// The script expects include/uapi/linux/bpf.h as it's input.
const (
FnUnspec BuiltinFunc = iota
FnMapLookupElem
FnMapUpdateElem
FnMapDeleteElem
FnProbeRead
FnKtimeGetNs
FnTracePrintk
FnGetPrandomU32
FnGetSmpProcessorId
FnSkbStoreBytes
FnL3CsumReplace
FnL4CsumReplace
FnTailCall
FnCloneRedirect
FnGetCurrentPidTgid
FnGetCurrentUidGid
FnGetCurrentComm
FnGetCgroupClassid
FnSkbVlanPush
FnSkbVlanPop
FnSkbGetTunnelKey
FnSkbSetTunnelKey
FnPerfEventRead
FnRedirect
FnGetRouteRealm
FnPerfEventOutput
FnSkbLoadBytes
FnGetStackid
FnCsumDiff
FnSkbGetTunnelOpt
FnSkbSetTunnelOpt
FnSkbChangeProto
FnSkbChangeType
FnSkbUnderCgroup
FnGetHashRecalc
FnGetCurrentTask
FnProbeWriteUser
FnCurrentTaskUnderCgroup
FnSkbChangeTail
FnSkbPullData
FnCsumUpdate
FnSetHashInvalid
FnGetNumaNodeId
FnSkbChangeHead
FnXdpAdjustHead
FnProbeReadStr
FnGetSocketCookie
FnGetSocketUid
FnSetHash
FnSetsockopt
FnSkbAdjustRoom
FnRedirectMap
FnSkRedirectMap
FnSockMapUpdate
FnXdpAdjustMeta
FnPerfEventReadValue
FnPerfProgReadValue
FnGetsockopt
FnOverrideReturn
FnSockOpsCbFlagsSet
FnMsgRedirectMap
FnMsgApplyBytes
FnMsgCorkBytes
FnMsgPullData
FnBind
FnXdpAdjustTail
FnSkbGetXfrmState
FnGetStack
FnSkbLoadBytesRelative
FnFibLookup
FnSockHashUpdate
FnMsgRedirectHash
FnSkRedirectHash
FnLwtPushEncap
FnLwtSeg6StoreBytes
FnLwtSeg6AdjustSrh
FnLwtSeg6Action
FnRcRepeat
FnRcKeydown
FnSkbCgroupId
FnGetCurrentCgroupId
FnGetLocalStorage
FnSkSelectReuseport
FnSkbAncestorCgroupId
FnSkLookupTcp
FnSkLookupUdp
FnSkRelease
FnMapPushElem
FnMapPopElem
FnMapPeekElem
FnMsgPushData
FnMsgPopData
FnRcPointerRel
FnSpinLock
FnSpinUnlock
FnSkFullsock
FnTcpSock
FnSkbEcnSetCe
FnGetListenerSock
FnSkcLookupTcp
FnTcpCheckSyncookie
FnSysctlGetName
FnSysctlGetCurrentValue
FnSysctlGetNewValue
FnSysctlSetNewValue
FnStrtol
FnStrtoul
FnSkStorageGet
FnSkStorageDelete
FnSendSignal
FnTcpGenSyncookie
FnSkbOutput
FnProbeReadUser
FnProbeReadKernel
FnProbeReadUserStr
FnProbeReadKernelStr
FnTcpSendAck
FnSendSignalThread
FnJiffies64
FnReadBranchRecords
FnGetNsCurrentPidTgid
FnXdpOutput
FnGetNetnsCookie
FnGetCurrentAncestorCgroupId
FnSkAssign
FnKtimeGetBootNs
FnSeqPrintf
FnSeqWrite
FnSkCgroupId
FnSkAncestorCgroupId
FnRingbufOutput
FnRingbufReserve
FnRingbufSubmit
FnRingbufDiscard
FnRingbufQuery
FnCsumLevel
FnSkcToTcp6Sock
FnSkcToTcpSock
FnSkcToTcpTimewaitSock
FnSkcToTcpRequestSock
FnSkcToUdp6Sock
FnGetTaskStack
FnLoadHdrOpt
FnStoreHdrOpt
FnReserveHdrOpt
FnInodeStorageGet
FnInodeStorageDelete
FnDPath
FnCopyFromUser
FnSnprintfBtf
FnSeqPrintfBtf
FnSkbCgroupClassid
FnRedirectNeigh
FnPerCpuPtr
FnThisCpuPtr
FnRedirectPeer
FnTaskStorageGet
FnTaskStorageDelete
FnGetCurrentTaskBtf
FnBprmOptsSet
FnKtimeGetCoarseNs
FnImaInodeHash
FnSockFromFile
FnCheckMtu
FnForEachMapElem
FnSnprintf
FnSysBpf
FnBtfFindByNameKind
FnSysClose
)
// Call emits a function call.
func (fn BuiltinFunc) Call() Instruction {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Call),
Constant: int64(fn),
}
}

191
vendor/github.com/cilium/ebpf/asm/func_string.go generated vendored Normal file

@ -0,0 +1,191 @@
// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[FnUnspec-0]
_ = x[FnMapLookupElem-1]
_ = x[FnMapUpdateElem-2]
_ = x[FnMapDeleteElem-3]
_ = x[FnProbeRead-4]
_ = x[FnKtimeGetNs-5]
_ = x[FnTracePrintk-6]
_ = x[FnGetPrandomU32-7]
_ = x[FnGetSmpProcessorId-8]
_ = x[FnSkbStoreBytes-9]
_ = x[FnL3CsumReplace-10]
_ = x[FnL4CsumReplace-11]
_ = x[FnTailCall-12]
_ = x[FnCloneRedirect-13]
_ = x[FnGetCurrentPidTgid-14]
_ = x[FnGetCurrentUidGid-15]
_ = x[FnGetCurrentComm-16]
_ = x[FnGetCgroupClassid-17]
_ = x[FnSkbVlanPush-18]
_ = x[FnSkbVlanPop-19]
_ = x[FnSkbGetTunnelKey-20]
_ = x[FnSkbSetTunnelKey-21]
_ = x[FnPerfEventRead-22]
_ = x[FnRedirect-23]
_ = x[FnGetRouteRealm-24]
_ = x[FnPerfEventOutput-25]
_ = x[FnSkbLoadBytes-26]
_ = x[FnGetStackid-27]
_ = x[FnCsumDiff-28]
_ = x[FnSkbGetTunnelOpt-29]
_ = x[FnSkbSetTunnelOpt-30]
_ = x[FnSkbChangeProto-31]
_ = x[FnSkbChangeType-32]
_ = x[FnSkbUnderCgroup-33]
_ = x[FnGetHashRecalc-34]
_ = x[FnGetCurrentTask-35]
_ = x[FnProbeWriteUser-36]
_ = x[FnCurrentTaskUnderCgroup-37]
_ = x[FnSkbChangeTail-38]
_ = x[FnSkbPullData-39]
_ = x[FnCsumUpdate-40]
_ = x[FnSetHashInvalid-41]
_ = x[FnGetNumaNodeId-42]
_ = x[FnSkbChangeHead-43]
_ = x[FnXdpAdjustHead-44]
_ = x[FnProbeReadStr-45]
_ = x[FnGetSocketCookie-46]
_ = x[FnGetSocketUid-47]
_ = x[FnSetHash-48]
_ = x[FnSetsockopt-49]
_ = x[FnSkbAdjustRoom-50]
_ = x[FnRedirectMap-51]
_ = x[FnSkRedirectMap-52]
_ = x[FnSockMapUpdate-53]
_ = x[FnXdpAdjustMeta-54]
_ = x[FnPerfEventReadValue-55]
_ = x[FnPerfProgReadValue-56]
_ = x[FnGetsockopt-57]
_ = x[FnOverrideReturn-58]
_ = x[FnSockOpsCbFlagsSet-59]
_ = x[FnMsgRedirectMap-60]
_ = x[FnMsgApplyBytes-61]
_ = x[FnMsgCorkBytes-62]
_ = x[FnMsgPullData-63]
_ = x[FnBind-64]
_ = x[FnXdpAdjustTail-65]
_ = x[FnSkbGetXfrmState-66]
_ = x[FnGetStack-67]
_ = x[FnSkbLoadBytesRelative-68]
_ = x[FnFibLookup-69]
_ = x[FnSockHashUpdate-70]
_ = x[FnMsgRedirectHash-71]
_ = x[FnSkRedirectHash-72]
_ = x[FnLwtPushEncap-73]
_ = x[FnLwtSeg6StoreBytes-74]
_ = x[FnLwtSeg6AdjustSrh-75]
_ = x[FnLwtSeg6Action-76]
_ = x[FnRcRepeat-77]
_ = x[FnRcKeydown-78]
_ = x[FnSkbCgroupId-79]
_ = x[FnGetCurrentCgroupId-80]
_ = x[FnGetLocalStorage-81]
_ = x[FnSkSelectReuseport-82]
_ = x[FnSkbAncestorCgroupId-83]
_ = x[FnSkLookupTcp-84]
_ = x[FnSkLookupUdp-85]
_ = x[FnSkRelease-86]
_ = x[FnMapPushElem-87]
_ = x[FnMapPopElem-88]
_ = x[FnMapPeekElem-89]
_ = x[FnMsgPushData-90]
_ = x[FnMsgPopData-91]
_ = x[FnRcPointerRel-92]
_ = x[FnSpinLock-93]
_ = x[FnSpinUnlock-94]
_ = x[FnSkFullsock-95]
_ = x[FnTcpSock-96]
_ = x[FnSkbEcnSetCe-97]
_ = x[FnGetListenerSock-98]
_ = x[FnSkcLookupTcp-99]
_ = x[FnTcpCheckSyncookie-100]
_ = x[FnSysctlGetName-101]
_ = x[FnSysctlGetCurrentValue-102]
_ = x[FnSysctlGetNewValue-103]
_ = x[FnSysctlSetNewValue-104]
_ = x[FnStrtol-105]
_ = x[FnStrtoul-106]
_ = x[FnSkStorageGet-107]
_ = x[FnSkStorageDelete-108]
_ = x[FnSendSignal-109]
_ = x[FnTcpGenSyncookie-110]
_ = x[FnSkbOutput-111]
_ = x[FnProbeReadUser-112]
_ = x[FnProbeReadKernel-113]
_ = x[FnProbeReadUserStr-114]
_ = x[FnProbeReadKernelStr-115]
_ = x[FnTcpSendAck-116]
_ = x[FnSendSignalThread-117]
_ = x[FnJiffies64-118]
_ = x[FnReadBranchRecords-119]
_ = x[FnGetNsCurrentPidTgid-120]
_ = x[FnXdpOutput-121]
_ = x[FnGetNetnsCookie-122]
_ = x[FnGetCurrentAncestorCgroupId-123]
_ = x[FnSkAssign-124]
_ = x[FnKtimeGetBootNs-125]
_ = x[FnSeqPrintf-126]
_ = x[FnSeqWrite-127]
_ = x[FnSkCgroupId-128]
_ = x[FnSkAncestorCgroupId-129]
_ = x[FnRingbufOutput-130]
_ = x[FnRingbufReserve-131]
_ = x[FnRingbufSubmit-132]
_ = x[FnRingbufDiscard-133]
_ = x[FnRingbufQuery-134]
_ = x[FnCsumLevel-135]
_ = x[FnSkcToTcp6Sock-136]
_ = x[FnSkcToTcpSock-137]
_ = x[FnSkcToTcpTimewaitSock-138]
_ = x[FnSkcToTcpRequestSock-139]
_ = x[FnSkcToUdp6Sock-140]
_ = x[FnGetTaskStack-141]
_ = x[FnLoadHdrOpt-142]
_ = x[FnStoreHdrOpt-143]
_ = x[FnReserveHdrOpt-144]
_ = x[FnInodeStorageGet-145]
_ = x[FnInodeStorageDelete-146]
_ = x[FnDPath-147]
_ = x[FnCopyFromUser-148]
_ = x[FnSnprintfBtf-149]
_ = x[FnSeqPrintfBtf-150]
_ = x[FnSkbCgroupClassid-151]
_ = x[FnRedirectNeigh-152]
_ = x[FnPerCpuPtr-153]
_ = x[FnThisCpuPtr-154]
_ = x[FnRedirectPeer-155]
_ = x[FnTaskStorageGet-156]
_ = x[FnTaskStorageDelete-157]
_ = x[FnGetCurrentTaskBtf-158]
_ = x[FnBprmOptsSet-159]
_ = x[FnKtimeGetCoarseNs-160]
_ = x[FnImaInodeHash-161]
_ = x[FnSockFromFile-162]
_ = x[FnCheckMtu-163]
_ = x[FnForEachMapElem-164]
_ = x[FnSnprintf-165]
_ = x[FnSysBpf-166]
_ = x[FnBtfFindByNameKind-167]
_ = x[FnSysClose-168]
}
const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysClose"
var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497}
func (i BuiltinFunc) String() string {
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]]
}

511
vendor/github.com/cilium/ebpf/asm/instruction.go generated vendored Normal file

@ -0,0 +1,511 @@
package asm
import (
"crypto/sha1"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"strings"
"github.com/cilium/ebpf/internal/unix"
)
// InstructionSize is the size of a BPF instruction in bytes
const InstructionSize = 8
// RawInstructionOffset is an offset in units of raw BPF instructions.
type RawInstructionOffset uint64
// Bytes returns the offset of an instruction in bytes.
func (rio RawInstructionOffset) Bytes() uint64 {
return uint64(rio) * InstructionSize
}
// Instruction is a single eBPF instruction.
type Instruction struct {
OpCode OpCode
Dst Register
Src Register
Offset int16
Constant int64
Reference string
Symbol string
}
// Sym creates a symbol.
func (ins Instruction) Sym(name string) Instruction {
ins.Symbol = name
return ins
}
// Unmarshal decodes a BPF instruction.
func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
var bi bpfInstruction
err := binary.Read(r, bo, &bi)
if err != nil {
return 0, err
}
ins.OpCode = bi.OpCode
ins.Offset = bi.Offset
ins.Constant = int64(bi.Constant)
ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
if err != nil {
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
}
if !bi.OpCode.IsDWordLoad() {
return InstructionSize, nil
}
var bi2 bpfInstruction
if err := binary.Read(r, bo, &bi2); err != nil {
// No Wrap, to avoid io.EOF clash
return 0, errors.New("64bit immediate is missing second half")
}
if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
return 0, errors.New("64bit immediate has non-zero fields")
}
ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
return 2 * InstructionSize, nil
}
// Marshal encodes a BPF instruction.
func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
if ins.OpCode == InvalidOpCode {
return 0, errors.New("invalid opcode")
}
isDWordLoad := ins.OpCode.IsDWordLoad()
cons := int32(ins.Constant)
if isDWordLoad {
// Encode least significant 32bit first for 64bit operations.
cons = int32(uint32(ins.Constant))
}
regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
if err != nil {
return 0, fmt.Errorf("can't marshal registers: %s", err)
}
bpfi := bpfInstruction{
ins.OpCode,
regs,
ins.Offset,
cons,
}
if err := binary.Write(w, bo, &bpfi); err != nil {
return 0, err
}
if !isDWordLoad {
return InstructionSize, nil
}
bpfi = bpfInstruction{
Constant: int32(ins.Constant >> 32),
}
if err := binary.Write(w, bo, &bpfi); err != nil {
return 0, err
}
return 2 * InstructionSize, nil
}
// RewriteMapPtr changes an instruction to use a new map fd.
//
// Returns an error if the instruction doesn't load a map.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
return errors.New("not a load from a map")
}
// Preserve the offset value for direct map loads.
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
rawFd := uint64(uint32(fd))
ins.Constant = int64(offset | rawFd)
return nil
}
// MapPtr returns the map fd for this instruction.
//
// The result is undefined if the instruction is not a load from a map,
// see IsLoadFromMap.
func (ins *Instruction) MapPtr() int {
return int(int32(uint64(ins.Constant) & math.MaxUint32))
}
// RewriteMapOffset changes the offset of a direct load from a map.
//
// Returns an error if the instruction is not a direct load.
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapValue {
return errors.New("not a direct load from a map")
}
fd := uint64(ins.Constant) & math.MaxUint32
ins.Constant = int64(uint64(offset)<<32 | fd)
return nil
}
func (ins *Instruction) mapOffset() uint32 {
return uint32(uint64(ins.Constant) >> 32)
}
// IsLoadFromMap returns true if the instruction loads from a map.
//
// This covers both loading the map pointer and direct map value loads.
func (ins *Instruction) IsLoadFromMap() bool {
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
}
// IsFunctionCall returns true if the instruction calls another BPF function.
//
// This is not the same thing as a BPF helper call.
func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call.
func (ins *Instruction) IsBuiltinCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0
}
// IsConstantLoad returns true if the instruction loads a constant of the
// given size.
func (ins *Instruction) IsConstantLoad(size Size) bool {
return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
}
// Format implements fmt.Formatter.
func (ins Instruction) Format(f fmt.State, c rune) {
if c != 'v' {
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
return
}
op := ins.OpCode
if op == InvalidOpCode {
fmt.Fprint(f, "INVALID")
return
}
// Omit trailing space for Exit
if op.JumpOp() == Exit {
fmt.Fprint(f, op)
return
}
if ins.IsLoadFromMap() {
fd := ins.MapPtr()
switch ins.Src {
case PseudoMapFD:
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
case PseudoMapValue:
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
}
goto ref
}
fmt.Fprintf(f, "%v ", op)
switch cls := op.Class(); cls {
case LdClass, LdXClass, StClass, StXClass:
switch op.Mode() {
case ImmMode:
fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
case AbsMode:
fmt.Fprintf(f, "imm: %d", ins.Constant)
case IndMode:
fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
case MemMode:
fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
case XAddMode:
fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
}
case ALU64Class, ALUClass:
fmt.Fprintf(f, "dst: %s ", ins.Dst)
if op.ALUOp() == Swap || op.Source() == ImmSource {
fmt.Fprintf(f, "imm: %d", ins.Constant)
} else {
fmt.Fprintf(f, "src: %s", ins.Src)
}
case JumpClass:
switch jop := op.JumpOp(); jop {
case Call:
if ins.Src == PseudoCall {
// bpf-to-bpf call
fmt.Fprint(f, ins.Constant)
} else {
fmt.Fprint(f, BuiltinFunc(ins.Constant))
}
default:
fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
if op.Source() == ImmSource {
fmt.Fprintf(f, "imm: %d", ins.Constant)
} else {
fmt.Fprintf(f, "src: %s", ins.Src)
}
}
}
ref:
if ins.Reference != "" {
fmt.Fprintf(f, " <%s>", ins.Reference)
}
}
// Instructions is an eBPF program.
type Instructions []Instruction
func (insns Instructions) String() string {
return fmt.Sprint(insns)
}
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
//
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
if symbol == "" {
return errors.New("empty symbol")
}
found := false
for i := range insns {
ins := &insns[i]
if ins.Reference != symbol {
continue
}
if err := ins.RewriteMapPtr(fd); err != nil {
return err
}
found = true
}
if !found {
return &unreferencedSymbolError{symbol}
}
return nil
}
// SymbolOffsets returns the set of symbols and their offset in
// the instructions.
func (insns Instructions) SymbolOffsets() (map[string]int, error) {
offsets := make(map[string]int)
for i, ins := range insns {
if ins.Symbol == "" {
continue
}
if _, ok := offsets[ins.Symbol]; ok {
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
}
offsets[ins.Symbol] = i
}
return offsets, nil
}
// ReferenceOffsets returns the set of references and their offset in
// the instructions.
func (insns Instructions) ReferenceOffsets() map[string][]int {
offsets := make(map[string][]int)
for i, ins := range insns {
if ins.Reference == "" {
continue
}
offsets[ins.Reference] = append(offsets[ins.Reference], i)
}
return offsets
}
// Format implements fmt.Formatter.
//
// You can control indentation of symbols by
// specifying a width. Setting a precision controls the indentation of
// instructions.
// The default character is a tab, which can be overridden by specifying
// the ' ' space flag.
func (insns Instructions) Format(f fmt.State, c rune) {
if c != 's' && c != 'v' {
fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
return
}
// Precision is better in this case, because it allows
// specifying 0 padding easily.
padding, ok := f.Precision()
if !ok {
padding = 1
}
indent := strings.Repeat("\t", padding)
if f.Flag(' ') {
indent = strings.Repeat(" ", padding)
}
symPadding, ok := f.Width()
if !ok {
symPadding = padding - 1
}
if symPadding < 0 {
symPadding = 0
}
symIndent := strings.Repeat("\t", symPadding)
if f.Flag(' ') {
symIndent = strings.Repeat(" ", symPadding)
}
// Guess how many digits we need at most, by assuming that all instructions
// are double wide.
highestOffset := len(insns) * 2
offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
iter := insns.Iterate()
for iter.Next() {
if iter.Ins.Symbol != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
}
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
}
}
// Marshal encodes a BPF program into the kernel format.
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
for i, ins := range insns {
_, err := ins.Marshal(w, bo)
if err != nil {
return fmt.Errorf("instruction %d: %w", i, err)
}
}
return nil
}
// Tag calculates the kernel tag for a series of instructions.
//
// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
// to ProgramInfo.Tag to figure out whether a loaded program matches
// certain instructions.
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
h := sha1.New()
for i, ins := range insns {
if ins.IsLoadFromMap() {
ins.Constant = 0
}
_, err := ins.Marshal(h, bo)
if err != nil {
return "", fmt.Errorf("instruction %d: %w", i, err)
}
}
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
}
// Iterate allows iterating a BPF program while keeping track of
// various offsets.
//
// Modifying the instruction slice will lead to undefined behaviour.
func (insns Instructions) Iterate() *InstructionIterator {
return &InstructionIterator{insns: insns}
}
// InstructionIterator iterates over a BPF program.
type InstructionIterator struct {
insns Instructions
// The instruction in question.
Ins *Instruction
// The index of the instruction in the original instruction slice.
Index int
// The offset of the instruction in raw BPF instructions. This accounts
// for double-wide instructions.
Offset RawInstructionOffset
}
// Next returns true as long as there are any instructions remaining.
func (iter *InstructionIterator) Next() bool {
if len(iter.insns) == 0 {
return false
}
if iter.Ins != nil {
iter.Index++
iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
}
iter.Ins = &iter.insns[0]
iter.insns = iter.insns[1:]
return true
}
type bpfInstruction struct {
OpCode OpCode
Registers bpfRegisters
Offset int16
Constant int32
}
type bpfRegisters uint8
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
switch bo {
case binary.LittleEndian:
return bpfRegisters((src << 4) | (dst & 0xF)), nil
case binary.BigEndian:
return bpfRegisters((dst << 4) | (src & 0xF)), nil
default:
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
}
func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
switch bo {
case binary.LittleEndian:
return Register(r & 0xF), Register(r >> 4), nil
case binary.BigEndian:
return Register(r >> 4), Register(r & 0xf), nil
default:
return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
}
type unreferencedSymbolError struct {
symbol string
}
func (use *unreferencedSymbolError) Error() string {
return fmt.Sprintf("unreferenced symbol %s", use.symbol)
}
// IsUnreferencedSymbol returns true if err was caused by
// an unreferenced symbol.
func IsUnreferencedSymbol(err error) bool {
_, ok := err.(*unreferencedSymbolError)
return ok
}

109
vendor/github.com/cilium/ebpf/asm/jump.go generated vendored Normal file

@ -0,0 +1,109 @@
package asm
//go:generate stringer -output jump_string.go -type=JumpOp
// JumpOp affect control flow.
//
// msb lsb
// +----+-+---+
// |OP |s|cls|
// +----+-+---+
type JumpOp uint8
const jumpMask OpCode = aluMask
const (
// InvalidJumpOp is returned by getters when invoked
// on non branch OpCodes
InvalidJumpOp JumpOp = 0xff
// Ja jumps by offset unconditionally
Ja JumpOp = 0x00
// JEq jumps by offset if r == imm
JEq JumpOp = 0x10
// JGT jumps by offset if r > imm
JGT JumpOp = 0x20
// JGE jumps by offset if r >= imm
JGE JumpOp = 0x30
// JSet jumps by offset if r & imm
JSet JumpOp = 0x40
// JNE jumps by offset if r != imm
JNE JumpOp = 0x50
// JSGT jumps by offset if signed r > signed imm
JSGT JumpOp = 0x60
// JSGE jumps by offset if signed r >= signed imm
JSGE JumpOp = 0x70
// Call builtin or user defined function from imm
Call JumpOp = 0x80
// Exit ends execution, with value in r0
Exit JumpOp = 0x90
// JLT jumps by offset if r < imm
JLT JumpOp = 0xa0
// JLE jumps by offset if r <= imm
JLE JumpOp = 0xb0
// JSLT jumps by offset if signed r < signed imm
JSLT JumpOp = 0xc0
// JSLE jumps by offset if signed r <= signed imm
JSLE JumpOp = 0xd0
)
// Return emits an exit instruction.
//
// Requires a return value in R0.
func Return() Instruction {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Exit),
}
}
// Op returns the OpCode for a given jump source.
func (op JumpOp) Op(source Source) OpCode {
return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
}
// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
if op == Exit || op == Call || op == Ja {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource),
Dst: dst,
Offset: -1,
Constant: int64(value),
Reference: label,
}
}
// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Reg(dst, src Register, label string) Instruction {
if op == Exit || op == Call || op == Ja {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource),
Dst: dst,
Src: src,
Offset: -1,
Reference: label,
}
}
// Label adjusts PC to the address of the label.
func (op JumpOp) Label(label string) Instruction {
if op == Call {
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(Call),
Src: PseudoCall,
Constant: -1,
Reference: label,
}
}
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(op),
Offset: -1,
Reference: label,
}
}

53
vendor/github.com/cilium/ebpf/asm/jump_string.go generated vendored Normal file

@ -0,0 +1,53 @@
// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidJumpOp-255]
_ = x[Ja-0]
_ = x[JEq-16]
_ = x[JGT-32]
_ = x[JGE-48]
_ = x[JSet-64]
_ = x[JNE-80]
_ = x[JSGT-96]
_ = x[JSGE-112]
_ = x[Call-128]
_ = x[Exit-144]
_ = x[JLT-160]
_ = x[JLE-176]
_ = x[JSLT-192]
_ = x[JSLE-208]
}
const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
var _JumpOp_map = map[JumpOp]string{
0: _JumpOp_name[0:2],
16: _JumpOp_name[2:5],
32: _JumpOp_name[5:8],
48: _JumpOp_name[8:11],
64: _JumpOp_name[11:15],
80: _JumpOp_name[15:18],
96: _JumpOp_name[18:22],
112: _JumpOp_name[22:26],
128: _JumpOp_name[26:30],
144: _JumpOp_name[30:34],
160: _JumpOp_name[34:37],
176: _JumpOp_name[37:40],
192: _JumpOp_name[40:44],
208: _JumpOp_name[44:48],
255: _JumpOp_name[48:61],
}
func (i JumpOp) String() string {
if str, ok := _JumpOp_map[i]; ok {
return str
}
return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
}

204
vendor/github.com/cilium/ebpf/asm/load_store.go generated vendored Normal file

@ -0,0 +1,204 @@
package asm
//go:generate stringer -output load_store_string.go -type=Mode,Size
// Mode for load and store operations
//
// msb lsb
// +---+--+---+
// |MDE|sz|cls|
// +---+--+---+
type Mode uint8
const modeMask OpCode = 0xe0
const (
// InvalidMode is returned by getters when invoked
// on non load / store OpCodes
InvalidMode Mode = 0xff
// ImmMode - immediate value
ImmMode Mode = 0x00
// AbsMode - immediate value + offset
AbsMode Mode = 0x20
// IndMode - indirect (imm+src)
IndMode Mode = 0x40
// MemMode - load from memory
MemMode Mode = 0x60
// XAddMode - add atomically across processors.
XAddMode Mode = 0xc0
)
// Size of load and store operations
//
// msb lsb
// +---+--+---+
// |mde|SZ|cls|
// +---+--+---+
type Size uint8
const sizeMask OpCode = 0x18
const (
// InvalidSize is returned by getters when invoked
// on non load / store OpCodes
InvalidSize Size = 0xff
// DWord - double word; 64 bits
DWord Size = 0x18
// Word - word; 32 bits
Word Size = 0x00
// Half - half-word; 16 bits
Half Size = 0x08
// Byte - byte; 8 bits
Byte Size = 0x10
)
// Sizeof returns the size in bytes.
func (s Size) Sizeof() int {
switch s {
case DWord:
return 8
case Word:
return 4
case Half:
return 2
case Byte:
return 1
default:
return -1
}
}
// LoadMemOp returns the OpCode to load a value of given size from memory.
func LoadMemOp(size Size) OpCode {
return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
}
// LoadMem emits `dst = *(size *)(src + offset)`.
func LoadMem(dst, src Register, offset int16, size Size) Instruction {
return Instruction{
OpCode: LoadMemOp(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// LoadImmOp returns the OpCode to load an immediate of given size.
//
// As of kernel 4.20, only DWord size is accepted.
func LoadImmOp(size Size) OpCode {
return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
}
// LoadImm emits `dst = (size)value`.
//
// As of kernel 4.20, only DWord size is accepted.
func LoadImm(dst Register, value int64, size Size) Instruction {
return Instruction{
OpCode: LoadImmOp(size),
Dst: dst,
Constant: value,
}
}
// LoadMapPtr stores a pointer to a map in dst.
func LoadMapPtr(dst Register, fd int) Instruction {
if fd < 0 {
return Instruction{OpCode: InvalidOpCode}
}
return Instruction{
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapFD,
Constant: int64(uint32(fd)),
}
}
// LoadMapValue stores a pointer to the value at a certain offset of a map.
func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
if fd < 0 {
return Instruction{OpCode: InvalidOpCode}
}
fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
return Instruction{
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapValue,
Constant: int64(fdAndOffset),
}
}
// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
func LoadIndOp(size Size) OpCode {
return OpCode(LdClass).SetMode(IndMode).SetSize(size)
}
// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
func LoadInd(dst, src Register, offset int32, size Size) Instruction {
return Instruction{
OpCode: LoadIndOp(size),
Dst: dst,
Src: src,
Constant: int64(offset),
}
}
// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
func LoadAbsOp(size Size) OpCode {
return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
}
// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
func LoadAbs(offset int32, size Size) Instruction {
return Instruction{
OpCode: LoadAbsOp(size),
Dst: R0,
Constant: int64(offset),
}
}
// StoreMemOp returns the OpCode for storing a register of given size in memory.
func StoreMemOp(size Size) OpCode {
return OpCode(StXClass).SetMode(MemMode).SetSize(size)
}
// StoreMem emits `*(size *)(dst + offset) = src`
func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
return Instruction{
OpCode: StoreMemOp(size),
Dst: dst,
Src: src,
Offset: offset,
}
}
// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
func StoreImmOp(size Size) OpCode {
return OpCode(StClass).SetMode(MemMode).SetSize(size)
}
// StoreImm emits `*(size *)(dst + offset) = value`.
func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
return Instruction{
OpCode: StoreImmOp(size),
Dst: dst,
Offset: offset,
Constant: value,
}
}
// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
func StoreXAddOp(size Size) OpCode {
return OpCode(StXClass).SetMode(XAddMode).SetSize(size)
}
// StoreXAdd atomically adds src to *dst.
func StoreXAdd(dst, src Register, size Size) Instruction {
return Instruction{
OpCode: StoreXAddOp(size),
Dst: dst,
Src: src,
}
}

80
vendor/github.com/cilium/ebpf/asm/load_store_string.go generated vendored Normal file

@ -0,0 +1,80 @@
// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidMode-255]
_ = x[ImmMode-0]
_ = x[AbsMode-32]
_ = x[IndMode-64]
_ = x[MemMode-96]
_ = x[XAddMode-192]
}
const (
_Mode_name_0 = "ImmMode"
_Mode_name_1 = "AbsMode"
_Mode_name_2 = "IndMode"
_Mode_name_3 = "MemMode"
_Mode_name_4 = "XAddMode"
_Mode_name_5 = "InvalidMode"
)
func (i Mode) String() string {
switch {
case i == 0:
return _Mode_name_0
case i == 32:
return _Mode_name_1
case i == 64:
return _Mode_name_2
case i == 96:
return _Mode_name_3
case i == 192:
return _Mode_name_4
case i == 255:
return _Mode_name_5
default:
return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
}
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidSize-255]
_ = x[DWord-24]
_ = x[Word-0]
_ = x[Half-8]
_ = x[Byte-16]
}
const (
_Size_name_0 = "Word"
_Size_name_1 = "Half"
_Size_name_2 = "Byte"
_Size_name_3 = "DWord"
_Size_name_4 = "InvalidSize"
)
func (i Size) String() string {
switch {
case i == 0:
return _Size_name_0
case i == 8:
return _Size_name_1
case i == 16:
return _Size_name_2
case i == 24:
return _Size_name_3
case i == 255:
return _Size_name_4
default:
return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

237
vendor/github.com/cilium/ebpf/asm/opcode.go generated vendored Normal file

@ -0,0 +1,237 @@
package asm
import (
"fmt"
"strings"
)
//go:generate stringer -output opcode_string.go -type=Class
type encoding int
const (
unknownEncoding encoding = iota
loadOrStore
jumpOrALU
)
// Class of operations
//
// msb lsb
// +---+--+---+
// | ?? |CLS|
// +---+--+---+
type Class uint8
const classMask OpCode = 0x07
const (
// LdClass load memory
LdClass Class = 0x00
// LdXClass load memory from constant
LdXClass Class = 0x01
// StClass load register from memory
StClass Class = 0x02
// StXClass load register from constant
StXClass Class = 0x03
// ALUClass arithmetic operators
ALUClass Class = 0x04
// JumpClass jump operators
JumpClass Class = 0x05
// ALU64Class arithmetic in 64 bit mode
ALU64Class Class = 0x07
)
func (cls Class) encoding() encoding {
switch cls {
case LdClass, LdXClass, StClass, StXClass:
return loadOrStore
case ALU64Class, ALUClass, JumpClass:
return jumpOrALU
default:
return unknownEncoding
}
}
// OpCode is a packed eBPF opcode.
//
// Its encoding is defined by a Class value:
//
// msb lsb
// +----+-+---+
// | ???? |CLS|
// +----+-+---+
type OpCode uint8
// InvalidOpCode is returned by setters on OpCode
const InvalidOpCode OpCode = 0xff
// rawInstructions returns the number of BPF instructions required
// to encode this opcode.
func (op OpCode) rawInstructions() int {
if op.IsDWordLoad() {
return 2
}
return 1
}
func (op OpCode) IsDWordLoad() bool {
return op == LoadImmOp(DWord)
}
// Class returns the class of operation.
func (op OpCode) Class() Class {
return Class(op & classMask)
}
// Mode returns the mode for load and store operations.
func (op OpCode) Mode() Mode {
if op.Class().encoding() != loadOrStore {
return InvalidMode
}
return Mode(op & modeMask)
}
// Size returns the size for load and store operations.
func (op OpCode) Size() Size {
if op.Class().encoding() != loadOrStore {
return InvalidSize
}
return Size(op & sizeMask)
}
// Source returns the source for branch and ALU operations.
func (op OpCode) Source() Source {
if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap {
return InvalidSource
}
return Source(op & sourceMask)
}
// ALUOp returns the ALUOp.
func (op OpCode) ALUOp() ALUOp {
if op.Class().encoding() != jumpOrALU {
return InvalidALUOp
}
return ALUOp(op & aluMask)
}
// Endianness returns the Endianness for a byte swap instruction.
func (op OpCode) Endianness() Endianness {
if op.ALUOp() != Swap {
return InvalidEndian
}
return Endianness(op & endianMask)
}
// JumpOp returns the JumpOp.
func (op OpCode) JumpOp() JumpOp {
if op.Class().encoding() != jumpOrALU {
return InvalidJumpOp
}
return JumpOp(op & jumpMask)
}
// SetMode sets the mode on load and store operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetMode(mode Mode) OpCode {
if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) {
return InvalidOpCode
}
return (op & ^modeMask) | OpCode(mode)
}
// SetSize sets the size on load and store operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSize(size Size) OpCode {
if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) {
return InvalidOpCode
}
return (op & ^sizeMask) | OpCode(size)
}
// SetSource sets the source on jump and ALU operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSource(source Source) OpCode {
if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) {
return InvalidOpCode
}
return (op & ^sourceMask) | OpCode(source)
}
// SetALUOp sets the ALUOp on ALU operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetALUOp(alu ALUOp) OpCode {
class := op.Class()
if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) {
return InvalidOpCode
}
return (op & ^aluMask) | OpCode(alu)
}
// SetJumpOp sets the JumpOp on jump operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) {
return InvalidOpCode
}
return (op & ^jumpMask) | OpCode(jump)
}
func (op OpCode) String() string {
var f strings.Builder
switch class := op.Class(); class {
case LdClass, LdXClass, StClass, StXClass:
f.WriteString(strings.TrimSuffix(class.String(), "Class"))
mode := op.Mode()
f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
switch op.Size() {
case DWord:
f.WriteString("DW")
case Word:
f.WriteString("W")
case Half:
f.WriteString("H")
case Byte:
f.WriteString("B")
}
case ALU64Class, ALUClass:
f.WriteString(op.ALUOp().String())
if op.ALUOp() == Swap {
// Width for Endian is controlled by Constant
f.WriteString(op.Endianness().String())
} else {
if class == ALUClass {
f.WriteString("32")
}
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
}
case JumpClass:
f.WriteString(op.JumpOp().String())
if jop := op.JumpOp(); jop != Exit && jop != Call {
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
}
default:
fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
}
return f.String()
}
// valid returns true if all bits in value are covered by mask.
func valid(value, mask OpCode) bool {
return value & ^mask == 0
}

38
vendor/github.com/cilium/ebpf/asm/opcode_string.go generated vendored Normal file

@ -0,0 +1,38 @@
// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
package asm
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[LdClass-0]
_ = x[LdXClass-1]
_ = x[StClass-2]
_ = x[StXClass-3]
_ = x[ALUClass-4]
_ = x[JumpClass-5]
_ = x[ALU64Class-7]
}
const (
_Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass"
_Class_name_1 = "ALU64Class"
)
var (
_Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47}
)
func (i Class) String() string {
switch {
case 0 <= i && i <= 5:
return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]]
case i == 7:
return _Class_name_1
default:
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

49
vendor/github.com/cilium/ebpf/asm/register.go generated vendored Normal file

@ -0,0 +1,49 @@
package asm
import (
"fmt"
)
// Register is the source or destination of most operations.
type Register uint8
// R0 contains return values.
const R0 Register = 0
// Registers for function arguments.
const (
R1 Register = R0 + 1 + iota
R2
R3
R4
R5
)
// Callee saved registers preserved by function calls.
const (
R6 Register = R5 + 1 + iota
R7
R8
R9
)
// Read-only frame pointer to access stack.
const (
R10 Register = R9 + 1
RFP = R10
)
// Pseudo registers used by 64bit loads and jumps
const (
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
PseudoCall = R1 // BPF_PSEUDO_CALL
)
func (r Register) String() string {
v := uint8(r)
if v == 10 {
return "rfp"
}
return fmt.Sprintf("r%d", v)
}

65
vendor/github.com/cilium/ebpf/attachtype_string.go generated vendored Normal file

@ -0,0 +1,65 @@
// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT.
package ebpf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[AttachNone-0]
_ = x[AttachCGroupInetIngress-0]
_ = x[AttachCGroupInetEgress-1]
_ = x[AttachCGroupInetSockCreate-2]
_ = x[AttachCGroupSockOps-3]
_ = x[AttachSkSKBStreamParser-4]
_ = x[AttachSkSKBStreamVerdict-5]
_ = x[AttachCGroupDevice-6]
_ = x[AttachSkMsgVerdict-7]
_ = x[AttachCGroupInet4Bind-8]
_ = x[AttachCGroupInet6Bind-9]
_ = x[AttachCGroupInet4Connect-10]
_ = x[AttachCGroupInet6Connect-11]
_ = x[AttachCGroupInet4PostBind-12]
_ = x[AttachCGroupInet6PostBind-13]
_ = x[AttachCGroupUDP4Sendmsg-14]
_ = x[AttachCGroupUDP6Sendmsg-15]
_ = x[AttachLircMode2-16]
_ = x[AttachFlowDissector-17]
_ = x[AttachCGroupSysctl-18]
_ = x[AttachCGroupUDP4Recvmsg-19]
_ = x[AttachCGroupUDP6Recvmsg-20]
_ = x[AttachCGroupGetsockopt-21]
_ = x[AttachCGroupSetsockopt-22]
_ = x[AttachTraceRawTp-23]
_ = x[AttachTraceFEntry-24]
_ = x[AttachTraceFExit-25]
_ = x[AttachModifyReturn-26]
_ = x[AttachLSMMac-27]
_ = x[AttachTraceIter-28]
_ = x[AttachCgroupInet4GetPeername-29]
_ = x[AttachCgroupInet6GetPeername-30]
_ = x[AttachCgroupInet4GetSockname-31]
_ = x[AttachCgroupInet6GetSockname-32]
_ = x[AttachXDPDevMap-33]
_ = x[AttachCgroupInetSockRelease-34]
_ = x[AttachXDPCPUMap-35]
_ = x[AttachSkLookup-36]
_ = x[AttachXDP-37]
_ = x[AttachSkSKBVerdict-38]
_ = x[AttachSkReuseportSelect-39]
_ = x[AttachSkReuseportSelectOrMigrate-40]
_ = x[AttachPerfEvent-41]
}
const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent"
var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610}
func (i AttachType) String() string {
if i >= AttachType(len(_AttachType_index)-1) {
return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
}

668
vendor/github.com/cilium/ebpf/collection.go generated vendored Normal file

@ -0,0 +1,668 @@
package ebpf
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"reflect"
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
)
// CollectionOptions control loading a collection into the kernel.
//
// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
type CollectionOptions struct {
Maps MapOptions
Programs ProgramOptions
}
// CollectionSpec describes a collection.
type CollectionSpec struct {
Maps map[string]*MapSpec
Programs map[string]*ProgramSpec
// ByteOrder specifies whether the ELF was compiled for
// big-endian or little-endian architectures.
ByteOrder binary.ByteOrder
}
// Copy returns a recursive copy of the spec.
func (cs *CollectionSpec) Copy() *CollectionSpec {
if cs == nil {
return nil
}
cpy := CollectionSpec{
Maps: make(map[string]*MapSpec, len(cs.Maps)),
Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
ByteOrder: cs.ByteOrder,
}
for name, spec := range cs.Maps {
cpy.Maps[name] = spec.Copy()
}
for name, spec := range cs.Programs {
cpy.Programs[name] = spec.Copy()
}
return &cpy
}
// RewriteMaps replaces all references to specific maps.
//
// Use this function to use pre-existing maps instead of creating new ones
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
//
// Returns an error if a named map isn't used in at least one program.
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
for symbol, m := range maps {
// have we seen a program that uses this symbol / map
seen := false
fd := m.FD()
for progName, progSpec := range cs.Programs {
err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
switch {
case err == nil:
seen = true
case asm.IsUnreferencedSymbol(err):
// Not all programs need to use the map
default:
return fmt.Errorf("program %s: %w", progName, err)
}
}
if !seen {
return fmt.Errorf("map %s not referenced by any programs", symbol)
}
// Prevent NewCollection from creating rewritten maps
delete(cs.Maps, symbol)
}
return nil
}
// RewriteConstants replaces the value of multiple constants.
//
// The constant must be defined like so in the C program:
//
// volatile const type foobar;
// volatile const type foobar = default;
//
// Replacement values must be of the same length as the C sizeof(type).
// If necessary, they are marshalled according to the same rules as
// map values.
//
// From Linux 5.5 the verifier will use constants to eliminate dead code.
//
// Returns an error if a constant doesn't exist.
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
rodata := cs.Maps[".rodata"]
if rodata == nil {
return errors.New("missing .rodata section")
}
if rodata.BTF == nil {
return errors.New(".rodata section has no BTF")
}
if n := len(rodata.Contents); n != 1 {
return fmt.Errorf("expected one key in .rodata, found %d", n)
}
kv := rodata.Contents[0]
value, ok := kv.Value.([]byte)
if !ok {
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
}
buf := make([]byte, len(value))
copy(buf, value)
err := patchValue(buf, rodata.BTF.Value, consts)
if err != nil {
return err
}
rodata.Contents[0] = MapKV{kv.Key, buf}
return nil
}
// Assign the contents of a CollectionSpec to a struct.
//
// This function is a shortcut to manually checking the presence
// of maps and programs in a CollectionSpec. Consider using bpf2go
// if this sounds useful.
//
// 'to' must be a pointer to a struct. A field of the
// struct is updated with values from Programs or Maps if it
// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
// The tag's value specifies the name of the program or map as
// found in the CollectionSpec.
//
// struct {
// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
// Bar *ebpf.MapSpec `ebpf:"bar_map"`
// Ignored int
// }
//
// Returns an error if any of the eBPF objects can't be found, or
// if the same MapSpec or ProgramSpec is assigned multiple times.
func (cs *CollectionSpec) Assign(to interface{}) error {
// Assign() only supports assigning ProgramSpecs and MapSpecs,
// so doesn't load any resources into the kernel.
getValue := func(typ reflect.Type, name string) (interface{}, error) {
switch typ {
case reflect.TypeOf((*ProgramSpec)(nil)):
if p := cs.Programs[name]; p != nil {
return p, nil
}
return nil, fmt.Errorf("missing program %q", name)
case reflect.TypeOf((*MapSpec)(nil)):
if m := cs.Maps[name]; m != nil {
return m, nil
}
return nil, fmt.Errorf("missing map %q", name)
default:
return nil, fmt.Errorf("unsupported type %s", typ)
}
}
return assignValues(to, getValue)
}
// LoadAndAssign loads Maps and Programs into the kernel and assigns them
// to a struct.
//
// This function is a shortcut to manually checking the presence
// of maps and programs in a CollectionSpec. Consider using bpf2go
// if this sounds useful.
//
// 'to' must be a pointer to a struct. A field of the struct is updated with
// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map.
// The tag's value specifies the name of the program or map as found in the
// CollectionSpec. Before updating the struct, the requested objects and their
// dependent resources are loaded into the kernel and populated with values if
// specified.
//
// struct {
// Foo *ebpf.Program `ebpf:"xdp_foo"`
// Bar *ebpf.Map `ebpf:"bar_map"`
// Ignored int
// }
//
// opts may be nil.
//
// Returns an error if any of the fields can't be found, or
// if the same Map or Program is assigned multiple times.
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
loader := newCollectionLoader(cs, opts)
defer loader.cleanup()
// Support assigning Programs and Maps, lazy-loading the required objects.
assignedMaps := make(map[string]bool)
getValue := func(typ reflect.Type, name string) (interface{}, error) {
switch typ {
case reflect.TypeOf((*Program)(nil)):
return loader.loadProgram(name)
case reflect.TypeOf((*Map)(nil)):
assignedMaps[name] = true
return loader.loadMap(name)
default:
return nil, fmt.Errorf("unsupported type %s", typ)
}
}
// Load the Maps and Programs requested by the annotated struct.
if err := assignValues(to, getValue); err != nil {
return err
}
// Populate the requested maps. Has a chance of lazy-loading other dependent maps.
if err := loader.populateMaps(); err != nil {
return err
}
// Evaluate the loader's objects after all (lazy)loading has taken place.
for n, m := range loader.maps {
switch m.typ {
case ProgramArray:
// Require all lazy-loaded ProgramArrays to be assigned to the given object.
// Without any references, they will be closed on the first GC and all tail
// calls into them will miss.
if !assignedMaps[n] {
return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n)
}
}
}
loader.finalize()
return nil
}
// Collection is a collection of Programs and Maps associated
// with their symbols
type Collection struct {
Programs map[string]*Program
Maps map[string]*Map
}
// NewCollection creates a Collection from a specification.
func NewCollection(spec *CollectionSpec) (*Collection, error) {
return NewCollectionWithOptions(spec, CollectionOptions{})
}
// NewCollectionWithOptions creates a Collection from a specification.
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
loader := newCollectionLoader(spec, &opts)
defer loader.cleanup()
// Create maps first, as their fds need to be linked into programs.
for mapName := range spec.Maps {
if _, err := loader.loadMap(mapName); err != nil {
return nil, err
}
}
for progName := range spec.Programs {
if _, err := loader.loadProgram(progName); err != nil {
return nil, err
}
}
// Maps can contain Program and Map stubs, so populate them after
// all Maps and Programs have been successfully loaded.
if err := loader.populateMaps(); err != nil {
return nil, err
}
maps, progs := loader.maps, loader.programs
loader.finalize()
return &Collection{
progs,
maps,
}, nil
}
type handleCache struct {
btfHandles map[*btf.Spec]*btf.Handle
btfSpecs map[io.ReaderAt]*btf.Spec
}
func newHandleCache() *handleCache {
return &handleCache{
btfHandles: make(map[*btf.Spec]*btf.Handle),
btfSpecs: make(map[io.ReaderAt]*btf.Spec),
}
}
func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
if hc.btfHandles[spec] != nil {
return hc.btfHandles[spec], nil
}
handle, err := btf.NewHandle(spec)
if err != nil {
return nil, err
}
hc.btfHandles[spec] = handle
return handle, nil
}
func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) {
if hc.btfSpecs[rd] != nil {
return hc.btfSpecs[rd], nil
}
spec, err := btf.LoadSpecFromReader(rd)
if err != nil {
return nil, err
}
hc.btfSpecs[rd] = spec
return spec, nil
}
func (hc handleCache) close() {
for _, handle := range hc.btfHandles {
handle.Close()
}
}
type collectionLoader struct {
coll *CollectionSpec
opts *CollectionOptions
maps map[string]*Map
programs map[string]*Program
handles *handleCache
}
func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader {
if opts == nil {
opts = &CollectionOptions{}
}
return &collectionLoader{
coll,
opts,
make(map[string]*Map),
make(map[string]*Program),
newHandleCache(),
}
}
// finalize should be called when all the collectionLoader's resources
// have been successfully loaded into the kernel and populated with values.
func (cl *collectionLoader) finalize() {
cl.maps, cl.programs = nil, nil
}
// cleanup cleans up all resources left over in the collectionLoader.
// Call finalize() when Map and Program creation/population is successful
// to prevent them from getting closed.
func (cl *collectionLoader) cleanup() {
cl.handles.close()
for _, m := range cl.maps {
m.Close()
}
for _, p := range cl.programs {
p.Close()
}
}
func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
if m := cl.maps[mapName]; m != nil {
return m, nil
}
mapSpec := cl.coll.Maps[mapName]
if mapSpec == nil {
return nil, fmt.Errorf("missing map %s", mapName)
}
m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
cl.maps[mapName] = m
return m, nil
}
func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
if prog := cl.programs[progName]; prog != nil {
return prog, nil
}
progSpec := cl.coll.Programs[progName]
if progSpec == nil {
return nil, fmt.Errorf("unknown program %s", progName)
}
progSpec = progSpec.Copy()
// Rewrite any reference to a valid map.
for i := range progSpec.Instructions {
ins := &progSpec.Instructions[i]
if !ins.IsLoadFromMap() || ins.Reference == "" {
continue
}
if uint32(ins.Constant) != math.MaxUint32 {
// Don't overwrite maps already rewritten, users can
// rewrite programs in the spec themselves
continue
}
m, err := cl.loadMap(ins.Reference)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
fd := m.FD()
if fd < 0 {
return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
}
if err := ins.RewriteMapPtr(m.FD()); err != nil {
return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err)
}
}
prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
cl.programs[progName] = prog
return prog, nil
}
func (cl *collectionLoader) populateMaps() error {
for mapName, m := range cl.maps {
mapSpec, ok := cl.coll.Maps[mapName]
if !ok {
return fmt.Errorf("missing map spec %s", mapName)
}
mapSpec = mapSpec.Copy()
// Replace any object stubs with loaded objects.
for i, kv := range mapSpec.Contents {
switch v := kv.Value.(type) {
case programStub:
// loadProgram is idempotent and could return an existing Program.
prog, err := cl.loadProgram(string(v))
if err != nil {
return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err)
}
mapSpec.Contents[i] = MapKV{kv.Key, prog}
case mapStub:
// loadMap is idempotent and could return an existing Map.
innerMap, err := cl.loadMap(string(v))
if err != nil {
return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err)
}
mapSpec.Contents[i] = MapKV{kv.Key, innerMap}
}
}
// Populate and freeze the map if specified.
if err := m.finalize(mapSpec); err != nil {
return fmt.Errorf("populating map %s: %w", mapName, err)
}
}
return nil
}
// LoadCollection parses an object file and converts it to a collection.
func LoadCollection(file string) (*Collection, error) {
spec, err := LoadCollectionSpec(file)
if err != nil {
return nil, err
}
return NewCollection(spec)
}
// Close frees all maps and programs associated with the collection.
//
// The collection mustn't be used afterwards.
func (coll *Collection) Close() {
for _, prog := range coll.Programs {
prog.Close()
}
for _, m := range coll.Maps {
m.Close()
}
}
// DetachMap removes the named map from the Collection.
//
// This means that a later call to Close() will not affect this map.
//
// Returns nil if no map of that name exists.
func (coll *Collection) DetachMap(name string) *Map {
m := coll.Maps[name]
delete(coll.Maps, name)
return m
}
// DetachProgram removes the named program from the Collection.
//
// This means that a later call to Close() will not affect this program.
//
// Returns nil if no program of that name exists.
func (coll *Collection) DetachProgram(name string) *Program {
p := coll.Programs[name]
delete(coll.Programs, name)
return p
}
// structField represents a struct field containing the ebpf struct tag.
type structField struct {
reflect.StructField
value reflect.Value
}
// ebpfFields extracts field names tagged with 'ebpf' from a struct type.
// Keep track of visited types to avoid infinite recursion.
func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) {
if visited == nil {
visited = make(map[reflect.Type]bool)
}
structType := structVal.Type()
if structType.Kind() != reflect.Struct {
return nil, fmt.Errorf("%s is not a struct", structType)
}
if visited[structType] {
return nil, fmt.Errorf("recursion on type %s", structType)
}
fields := make([]structField, 0, structType.NumField())
for i := 0; i < structType.NumField(); i++ {
field := structField{structType.Field(i), structVal.Field(i)}
// If the field is tagged, gather it and move on.
name := field.Tag.Get("ebpf")
if name != "" {
fields = append(fields, field)
continue
}
// If the field does not have an ebpf tag, but is a struct or a pointer
// to a struct, attempt to gather its fields as well.
var v reflect.Value
switch field.Type.Kind() {
case reflect.Ptr:
if field.Type.Elem().Kind() != reflect.Struct {
continue
}
if field.value.IsNil() {
return nil, fmt.Errorf("nil pointer to %s", structType)
}
// Obtain the destination type of the pointer.
v = field.value.Elem()
case reflect.Struct:
// Reference the value's type directly.
v = field.value
default:
continue
}
inner, err := ebpfFields(v, visited)
if err != nil {
return nil, fmt.Errorf("field %s: %w", field.Name, err)
}
fields = append(fields, inner...)
}
return fields, nil
}
// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'.
//
// getValue is called for every tagged field of 'to' and must return the value
// to be assigned to the field with the given typ and name.
func assignValues(to interface{},
getValue func(typ reflect.Type, name string) (interface{}, error)) error {
toValue := reflect.ValueOf(to)
if toValue.Type().Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer to struct", to)
}
if toValue.IsNil() {
return fmt.Errorf("nil pointer to %T", to)
}
fields, err := ebpfFields(toValue.Elem(), nil)
if err != nil {
return err
}
type elem struct {
// Either *Map or *Program
typ reflect.Type
name string
}
assigned := make(map[elem]string)
for _, field := range fields {
// Get string value the field is tagged with.
tag := field.Tag.Get("ebpf")
if strings.Contains(tag, ",") {
return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
}
// Check if the eBPF object with the requested
// type and tag was already assigned elsewhere.
e := elem{field.Type, tag}
if af := assigned[e]; af != "" {
return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af)
}
// Get the eBPF object referred to by the tag.
value, err := getValue(field.Type, tag)
if err != nil {
return fmt.Errorf("field %s: %w", field.Name, err)
}
if !field.value.CanSet() {
return fmt.Errorf("field %s: can't set value", field.Name)
}
field.value.Set(reflect.ValueOf(value))
assigned[e] = field.Name
}
return nil
}

16
vendor/github.com/cilium/ebpf/doc.go generated vendored Normal file

@ -0,0 +1,16 @@
// Package ebpf is a toolkit for working with eBPF programs.
//
// eBPF programs are small snippets of code which are executed directly
// in a VM in the Linux kernel, which makes them very fast and flexible.
// Many Linux subsystems now accept eBPF programs. This makes it possible
// to implement highly application specific logic inside the kernel,
// without having to modify the actual kernel itself.
//
// This package is designed for long-running processes which
// want to use eBPF to implement part of their application logic. It has no
// run-time dependencies outside of the library and the Linux kernel itself.
// eBPF code should be compiled ahead of time using clang, and shipped with
// your application as any other resource.
//
// Use the link subpackage to attach a loaded program to a hook in the kernel.
package ebpf

1077
vendor/github.com/cilium/ebpf/elf_reader.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

22
vendor/github.com/cilium/ebpf/elf_reader_fuzz.go generated vendored Normal file

@ -0,0 +1,22 @@
//go:build gofuzz
// +build gofuzz
// Use with https://github.com/dvyukov/go-fuzz
package ebpf
import "bytes"
func FuzzLoadCollectionSpec(data []byte) int {
spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data))
if err != nil {
if spec != nil {
panic("spec is not nil")
}
return 0
}
if spec == nil {
panic("spec is nil")
}
return 1
}

9
vendor/github.com/cilium/ebpf/go.mod generated vendored Normal file

@ -0,0 +1,9 @@
module github.com/cilium/ebpf
go 1.16
require (
github.com/frankban/quicktest v1.11.3
github.com/google/go-cmp v0.5.4
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34
)

13
vendor/github.com/cilium/ebpf/go.sum generated vendored Normal file

@ -0,0 +1,13 @@
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

273
vendor/github.com/cilium/ebpf/info.go generated vendored Normal file

@ -0,0 +1,273 @@
package ebpf
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"strings"
"syscall"
"time"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
)
// MapInfo describes a map.
type MapInfo struct {
Type MapType
id MapID
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
// Name as supplied by user space at load time.
Name string
}
func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
info, err := bpfGetMapInfoByFD(fd)
if errors.Is(err, syscall.EINVAL) {
return newMapInfoFromProc(fd)
}
if err != nil {
return nil, err
}
return &MapInfo{
MapType(info.map_type),
MapID(info.id),
info.key_size,
info.value_size,
info.max_entries,
info.map_flags,
// name is available from 4.15.
internal.CString(info.name[:]),
}, nil
}
func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) {
var mi MapInfo
err := scanFdInfo(fd, map[string]interface{}{
"map_type": &mi.Type,
"key_size": &mi.KeySize,
"value_size": &mi.ValueSize,
"max_entries": &mi.MaxEntries,
"map_flags": &mi.Flags,
})
if err != nil {
return nil, err
}
return &mi, nil
}
// ID returns the map ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (mi *MapInfo) ID() (MapID, bool) {
return mi.id, mi.id > 0
}
// programStats holds statistics of a program.
type programStats struct {
// Total accumulated runtime of the program ins ns.
runtime time.Duration
// Total number of times the program was called.
runCount uint64
}
// ProgramInfo describes a program.
type ProgramInfo struct {
Type ProgramType
id ProgramID
// Truncated hash of the BPF bytecode.
Tag string
// Name as supplied by user space at load time.
Name string
// BTF for the program.
btf btf.ID
// IDS map ids related to program.
ids []MapID
stats *programStats
}
func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
info, err := bpfGetProgInfoByFD(fd, nil)
if errors.Is(err, syscall.EINVAL) {
return newProgramInfoFromProc(fd)
}
if err != nil {
return nil, err
}
var mapIDs []MapID
if info.nr_map_ids > 0 {
mapIDs = make([]MapID, info.nr_map_ids)
info, err = bpfGetProgInfoByFD(fd, mapIDs)
if err != nil {
return nil, err
}
}
return &ProgramInfo{
Type: ProgramType(info.prog_type),
id: ProgramID(info.id),
// tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD.
Tag: hex.EncodeToString(info.tag[:]),
// name is available from 4.15.
Name: internal.CString(info.name[:]),
btf: btf.ID(info.btf_id),
ids: mapIDs,
stats: &programStats{
runtime: time.Duration(info.run_time_ns),
runCount: info.run_cnt,
},
}, nil
}
func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) {
var info ProgramInfo
err := scanFdInfo(fd, map[string]interface{}{
"prog_type": &info.Type,
"prog_tag": &info.Tag,
})
if errors.Is(err, errMissingFields) {
return nil, &internal.UnsupportedFeatureError{
Name: "reading program info from /proc/self/fdinfo",
MinimumVersion: internal.Version{4, 10, 0},
}
}
if err != nil {
return nil, err
}
return &info, nil
}
// ID returns the program ID.
//
// Available from 4.13.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) ID() (ProgramID, bool) {
return pi.id, pi.id > 0
}
// BTFID returns the BTF ID associated with the program.
//
// Available from 5.0.
//
// The bool return value indicates whether this optional field is available and
// populated. (The field may be available but not populated if the kernel
// supports the field but the program was loaded without BTF information.)
func (pi *ProgramInfo) BTFID() (btf.ID, bool) {
return pi.btf, pi.btf > 0
}
// RunCount returns the total number of times the program was called.
//
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) RunCount() (uint64, bool) {
if pi.stats != nil {
return pi.stats.runCount, true
}
return 0, false
}
// Runtime returns the total accumulated runtime of the program.
//
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
if pi.stats != nil {
return pi.stats.runtime, true
}
return time.Duration(0), false
}
// MapIDs returns the maps related to the program.
//
// The bool return value indicates whether this optional field is available.
func (pi *ProgramInfo) MapIDs() ([]MapID, bool) {
return pi.ids, pi.ids != nil
}
func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
raw, err := fd.Value()
if err != nil {
return err
}
fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
if err != nil {
return err
}
defer fh.Close()
if err := scanFdInfoReader(fh, fields); err != nil {
return fmt.Errorf("%s: %w", fh.Name(), err)
}
return nil
}
var errMissingFields = errors.New("missing fields")
func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
var (
scanner = bufio.NewScanner(r)
scanned int
)
for scanner.Scan() {
parts := strings.SplitN(scanner.Text(), "\t", 2)
if len(parts) != 2 {
continue
}
name := strings.TrimSuffix(parts[0], ":")
field, ok := fields[string(name)]
if !ok {
continue
}
if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
return fmt.Errorf("can't parse field %s: %v", name, err)
}
scanned++
}
if err := scanner.Err(); err != nil {
return err
}
if scanned != len(fields) {
return errMissingFields
}
return nil
}
// EnableStats starts the measuring of the runtime
// and run counts of eBPF programs.
//
// Collecting statistics can have an impact on the performance.
//
// Requires at least 5.8.
func EnableStats(which uint32) (io.Closer, error) {
attr := internal.BPFEnableStatsAttr{
StatsType: which,
}
fd, err := internal.BPFEnableStats(&attr)
if err != nil {
return nil, err
}
return fd, nil
}

6
vendor/github.com/cilium/ebpf/internal/align.go generated vendored Normal file

@ -0,0 +1,6 @@
package internal
// Align returns 'n' updated to 'alignment' boundary.
func Align(n, alignment int) int {
return (int(n) + alignment - 1) / alignment * alignment
}

798
vendor/github.com/cilium/ebpf/internal/btf/btf.go generated vendored Normal file

@ -0,0 +1,798 @@
package btf
import (
"bytes"
"debug/elf"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"os"
"reflect"
"sync"
"unsafe"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
)
const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
)
// ID represents the unique ID of a BTF object.
type ID uint32
// Spec represents decoded BTF.
type Spec struct {
rawTypes []rawType
strings stringTable
types []Type
namedTypes map[string][]NamedType
funcInfos map[string]extInfo
lineInfos map[string]extInfo
coreRelos map[string]coreRelos
byteOrder binary.ByteOrder
}
type btfHeader struct {
Magic uint16
Version uint8
Flags uint8
HdrLen uint32
TypeOff uint32
TypeLen uint32
StringOff uint32
StringLen uint32
}
// LoadSpecFromReader reads BTF sections from an ELF.
//
// Returns ErrNotFound if the reader contains no BTF.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, err
}
defer file.Close()
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
variableOffsets := make(map[variable]uint32)
for _, symbol := range symbols {
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
if int(symbol.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
}
secName := file.Sections[symbol.Section].Name
if symbol.Value > math.MaxUint32 {
return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
}
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
}
return loadSpecFromELF(file, variableOffsets)
}
func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) {
var (
btfSection *elf.Section
btfExtSection *elf.Section
sectionSizes = make(map[string]uint32)
)
for _, sec := range file.Sections {
switch sec.Name {
case ".BTF":
btfSection = sec
case ".BTF.ext":
btfExtSection = sec
default:
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
break
}
if sec.Size > math.MaxUint32 {
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
}
sectionSizes[sec.Name] = uint32(sec.Size)
}
}
if btfSection == nil {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
if err != nil {
return nil, err
}
if btfExtSection == nil {
return spec, nil
}
spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
if err != nil {
return nil, fmt.Errorf("can't read ext info: %w", err)
}
return spec, nil
}
// LoadRawSpec reads a blob of BTF data that isn't wrapped in an ELF file.
//
// Prefer using LoadSpecFromReader, since this function only supports a subset
// of BTF.
func LoadRawSpec(btf io.Reader, bo binary.ByteOrder) (*Spec, error) {
// This will return an error if we encounter a Datasec, since we can't fix
// it up.
return loadRawSpec(btf, bo, nil, nil)
}
func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
rawTypes, rawStrings, err := parseBTF(btf, bo)
if err != nil {
return nil, err
}
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
if err != nil {
return nil, err
}
types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
if err != nil {
return nil, err
}
return &Spec{
rawTypes: rawTypes,
namedTypes: typesByName,
types: types,
strings: rawStrings,
byteOrder: bo,
}, nil
}
var kernelBTF struct {
sync.Mutex
*Spec
}
// LoadKernelSpec returns the current kernel's BTF information.
//
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
// ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
kernelBTF.Lock()
defer kernelBTF.Unlock()
if kernelBTF.Spec != nil {
return kernelBTF.Spec, nil
}
var err error
kernelBTF.Spec, err = loadKernelSpec()
return kernelBTF.Spec, err
}
func loadKernelSpec() (*Spec, error) {
release, err := unix.KernelRelease()
if err != nil {
return nil, fmt.Errorf("can't read kernel release number: %w", err)
}
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
return LoadRawSpec(fh, internal.NativeEndian)
}
// use same list of locations as libbpf
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
locations := []string{
"/boot/vmlinux-%s",
"/lib/modules/%s/vmlinux-%[1]s",
"/lib/modules/%s/build/vmlinux",
"/usr/lib/modules/%s/kernel/vmlinux",
"/usr/lib/debug/boot/vmlinux-%s",
"/usr/lib/debug/boot/vmlinux-%s.debug",
"/usr/lib/debug/lib/modules/%s/vmlinux",
}
for _, loc := range locations {
path := fmt.Sprintf(loc, release)
fh, err := os.Open(path)
if err != nil {
continue
}
defer fh.Close()
file, err := internal.NewSafeELFFile(fh)
if err != nil {
return nil, err
}
defer file.Close()
return loadSpecFromELF(file, nil)
}
return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
}
func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) {
rawBTF, err := io.ReadAll(btf)
if err != nil {
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
}
rd := bytes.NewReader(rawBTF)
var header btfHeader
if err := binary.Read(rd, bo, &header); err != nil {
return nil, nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, errors.New("header is too short")
}
if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
return nil, nil, fmt.Errorf("header padding: %v", err)
}
if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
}
rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
if err != nil {
return nil, nil, fmt.Errorf("can't read type names: %w", err)
}
if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
}
rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
if err != nil {
return nil, nil, fmt.Errorf("can't read types: %w", err)
}
return rawTypes, rawStrings, nil
}
type variable struct {
section string
name string
}
func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
for i, rawType := range rawTypes {
if rawType.Kind() != kindDatasec {
continue
}
name, err := rawStrings.Lookup(rawType.NameOff)
if err != nil {
return err
}
if name == ".kconfig" || name == ".ksyms" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
if rawTypes[i].SizeType != 0 {
continue
}
size, ok := sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
rawTypes[i].SizeType = size
secinfos := rawType.data.([]btfVarSecinfo)
for j, secInfo := range secinfos {
id := int(secInfo.Type - 1)
if id >= len(rawTypes) {
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
}
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
if err != nil {
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
}
offset, ok := variableOffsets[variable{name, varName}]
if !ok {
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
}
secinfos[j].Offset = offset
}
}
return nil
}
// Copy creates a copy of Spec.
func (s *Spec) Copy() *Spec {
types, _ := copyTypes(s.types, nil)
namedTypes := make(map[string][]NamedType)
for _, typ := range types {
if named, ok := typ.(NamedType); ok {
name := essentialName(named.TypeName())
namedTypes[name] = append(namedTypes[name], named)
}
}
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
s.rawTypes,
s.strings,
types,
namedTypes,
s.funcInfos,
s.lineInfos,
s.coreRelos,
s.byteOrder,
}
}
type marshalOpts struct {
ByteOrder binary.ByteOrder
StripFuncLinkage bool
}
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
headerLen = binary.Size(header)
)
// Reserve space for the header. We have to write it last since
// we don't know the size of the type section yet.
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(StaticFunc)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
}
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
_, _ = buf.Write(s.strings)
// Fill out the header, and write it out.
header = &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(headerLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(len(s.strings)),
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
if err != nil {
return nil, fmt.Errorf("can't write header: %v", err)
}
return raw, nil
}
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
if len(p) != len(sw) {
return 0, errors.New("size doesn't match")
}
return copy(sw, p), nil
}
// Program finds the BTF for a specific section.
//
// Length is the number of bytes in the raw BPF instruction stream.
//
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
// contain extended BTF info.
func (s *Spec) Program(name string, length uint64) (*Program, error) {
if length == 0 {
return nil, errors.New("length musn't be zero")
}
if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
}
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
relos, coreOK := s.coreRelos[name]
if !funcOK && !lineOK && !coreOK {
return nil, fmt.Errorf("no extended BTF info for section %s", name)
}
return &Program{s, length, funcInfos, lineInfos, relos}, nil
}
// FindType searches for a type with a specific name.
//
// Called T a type that satisfies Type, typ must be a non-nil **T.
// On success, the address of the found type will be copied in typ.
//
// Returns an error wrapping ErrNotFound if no matching
// type exists in spec.
func (s *Spec) FindType(name string, typ interface{}) error {
typValue := reflect.ValueOf(typ)
if typValue.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", typ)
}
typPtr := typValue.Elem()
if !typPtr.CanSet() {
return fmt.Errorf("%T cannot be set", typ)
}
wanted := typPtr.Type()
if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
return fmt.Errorf("%T does not satisfy Type interface", typ)
}
var candidate Type
for _, typ := range s.namedTypes[essentialName(name)] {
if reflect.TypeOf(typ) != wanted {
continue
}
// Match against the full name, not just the essential one.
if typ.TypeName() != name {
continue
}
if candidate != nil {
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
}
candidate = typ
}
if candidate == nil {
return fmt.Errorf("type %s: %w", name, ErrNotFound)
}
typPtr.Set(reflect.ValueOf(candidate))
return nil
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
spec *Spec
fd *internal.FD
}
// NewHandle loads BTF into the kernel.
//
// Returns ErrNotSupported if BTF is not supported.
func NewHandle(spec *Spec) (*Handle, error) {
if err := haveBTF(); err != nil {
return nil, err
}
if spec.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
btf, err := spec.marshal(marshalOpts{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
})
if err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
}
fd, err := bpfLoadBTF(attr)
if err != nil {
logBuf := make([]byte, 64*1024)
attr.logBuf = internal.NewSlicePointer(logBuf)
attr.btfLogSize = uint32(len(logBuf))
attr.btfLogLevel = 1
_, logErr := bpfLoadBTF(attr)
return nil, internal.ErrorWithLog(err, logBuf, logErr)
}
return &Handle{spec.Copy(), fd}, nil
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := internal.BPFObjGetFDByID(internal.BPF_BTF_GET_FD_BY_ID, uint32(id))
if err != nil {
return nil, fmt.Errorf("get BTF by id: %w", err)
}
info, err := newInfoFromFd(fd)
if err != nil {
_ = fd.Close()
return nil, fmt.Errorf("get BTF spec for handle: %w", err)
}
return &Handle{info.BTF, fd}, nil
}
// Spec returns the Spec that defined the BTF loaded into the kernel.
func (h *Handle) Spec() *Spec {
return h.spec
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
value, err := h.fd.Value()
if err != nil {
return -1
}
return int(value)
}
// Map is the BTF for a map.
type Map struct {
Spec *Spec
Key, Value Type
}
// Program is the BTF information for a stream of instructions.
type Program struct {
spec *Spec
length uint64
funcInfos, lineInfos extInfo
coreRelos coreRelos
}
// Spec returns the BTF spec of this program.
func (p *Program) Spec() *Spec {
return p.spec
}
// Append the information from other to the Program.
func (p *Program) Append(other *Program) error {
if other.spec != p.spec {
return fmt.Errorf("can't append program with different BTF specs")
}
funcInfos, err := p.funcInfos.append(other.funcInfos, p.length)
if err != nil {
return fmt.Errorf("func infos: %w", err)
}
lineInfos, err := p.lineInfos.append(other.lineInfos, p.length)
if err != nil {
return fmt.Errorf("line infos: %w", err)
}
p.funcInfos = funcInfos
p.lineInfos = lineInfos
p.coreRelos = p.coreRelos.append(other.coreRelos, p.length)
p.length += other.length
return nil
}
// FuncInfos returns the binary form of BTF function infos.
func (p *Program) FuncInfos() (recordSize uint32, bytes []byte, err error) {
bytes, err = p.funcInfos.MarshalBinary()
if err != nil {
return 0, nil, fmt.Errorf("func infos: %w", err)
}
return p.funcInfos.recordSize, bytes, nil
}
// LineInfos returns the binary form of BTF line infos.
func (p *Program) LineInfos() (recordSize uint32, bytes []byte, err error) {
bytes, err = p.lineInfos.MarshalBinary()
if err != nil {
return 0, nil, fmt.Errorf("line infos: %w", err)
}
return p.lineInfos.recordSize, bytes, nil
}
// Fixups returns the changes required to adjust the program to the target.
//
// Passing a nil target will relocate against the running kernel.
func (p *Program) Fixups(target *Spec) (COREFixups, error) {
if len(p.coreRelos) == 0 {
return nil, nil
}
if target == nil {
var err error
target, err = LoadKernelSpec()
if err != nil {
return nil, err
}
}
return coreRelocate(p.spec, target, p.coreRelos)
}
type bpfLoadBTFAttr struct {
btf internal.Pointer
logBuf internal.Pointer
btfSize uint32
btfLogSize uint32
btfLogLevel uint32
}
func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return internal.NewFD(uint32(fd)), nil
}
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24
typesLen := uint32(binary.Size(types))
header := btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typesLen,
StringOff: typesLen,
StringLen: uint32(len(strings)),
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, types)
buf.Write(strings)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
}
strings = []byte{0, 'a', 0}
)
// We use a BTF_KIND_VAR here, to make sure that
// the kernel understands BTF at least as well as we
// do. BTF_KIND_VAR was introduced ~5.1.
types.Integer.SetKind(kindPointer)
types.Var.NameOff = 1
types.Var.SetKind(kindVar)
types.Var.SizeType = 1
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: loading the program
// might still succeed without BTF.
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
if err := haveBTF(); err != nil {
return err
}
var (
types struct {
FuncProto btfType
Func btfType
}
strings = []byte{0, 'a', 0}
)
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
types.Func.SetLinkage(GlobalFunc)
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
})
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
})

287
vendor/github.com/cilium/ebpf/internal/btf/btf_types.go generated vendored Normal file

@ -0,0 +1,287 @@
package btf
import (
"encoding/binary"
"fmt"
"io"
)
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
// btfKind describes a Type.
type btfKind uint8
// Equivalents of the BTF_KIND_* constants.
const (
kindUnknown btfKind = iota
kindInt
kindPointer
kindArray
kindStruct
kindUnion
kindEnum
kindForward
kindTypedef
kindVolatile
kindConst
kindRestrict
// Added ~4.20
kindFunc
kindFuncProto
// Added ~5.1
kindVar
kindDatasec
// Added ~5.13
kindFloat
)
// FuncLinkage describes BTF function linkage metadata.
type FuncLinkage int
// Equivalent of enum btf_func_linkage.
const (
StaticFunc FuncLinkage = iota // static
GlobalFunc // global
ExternFunc // extern
)
// VarLinkage describes BTF variable linkage metadata.
type VarLinkage int
const (
StaticVar VarLinkage = iota // static
GlobalVar // global
ExternVar // extern
)
const (
btfTypeKindShift = 24
btfTypeKindLen = 5
btfTypeVlenShift = 0
btfTypeVlenMask = 16
btfTypeKindFlagShift = 31
btfTypeKindFlagMask = 1
)
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members), linkage
* bits 16-23: unused
* bits 24-28: kind (e.g. int, ptr, array...etc)
* bits 29-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
Info uint32
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
SizeType uint32
}
func (k btfKind) String() string {
switch k {
case kindUnknown:
return "Unknown"
case kindInt:
return "Integer"
case kindPointer:
return "Pointer"
case kindArray:
return "Array"
case kindStruct:
return "Struct"
case kindUnion:
return "Union"
case kindEnum:
return "Enumeration"
case kindForward:
return "Forward"
case kindTypedef:
return "Typedef"
case kindVolatile:
return "Volatile"
case kindConst:
return "Const"
case kindRestrict:
return "Restrict"
case kindFunc:
return "Function"
case kindFuncProto:
return "Function Proto"
case kindVar:
return "Variable"
case kindDatasec:
return "Section"
case kindFloat:
return "Float"
default:
return fmt.Sprintf("Unknown (%d)", k)
}
}
func mask(len uint32) uint32 {
return (1 << len) - 1
}
func (bt *btfType) info(len, shift uint32) uint32 {
return (bt.Info >> shift) & mask(len)
}
func (bt *btfType) setInfo(value, len, shift uint32) {
bt.Info &^= mask(len) << shift
bt.Info |= (value & mask(len)) << shift
}
func (bt *btfType) Kind() btfKind {
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
}
func (bt *btfType) SetKind(kind btfKind) {
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
}
func (bt *btfType) Vlen() int {
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) KindFlag() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
func (bt *btfType) SetLinkage(linkage FuncLinkage) {
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) Type() TypeID {
// TODO: Panic here if wrong kind?
return TypeID(bt.SizeType)
}
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
}
type rawType struct {
btfType
data interface{}
}
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
if err := binary.Write(w, bo, &rt.btfType); err != nil {
return err
}
if rt.data == nil {
return nil
}
return binary.Write(w, bo, rt.data)
}
type btfArray struct {
Type TypeID
IndexType TypeID
Nelems uint32
}
type btfMember struct {
NameOff uint32
Type TypeID
Offset uint32
}
type btfVarSecinfo struct {
Type TypeID
Offset uint32
Size uint32
}
type btfVariable struct {
Linkage uint32
}
type btfEnum struct {
NameOff uint32
Val int32
}
type btfParam struct {
NameOff uint32
Type TypeID
}
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
var (
header btfType
types []rawType
)
for id := TypeID(1); ; id++ {
if err := binary.Read(r, bo, &header); err == io.EOF {
return types, nil
} else if err != nil {
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
}
var data interface{}
switch header.Kind() {
case kindInt:
data = new(uint32)
case kindPointer:
case kindArray:
data = new(btfArray)
case kindStruct:
fallthrough
case kindUnion:
data = make([]btfMember, header.Vlen())
case kindEnum:
data = make([]btfEnum, header.Vlen())
case kindForward:
case kindTypedef:
case kindVolatile:
case kindConst:
case kindRestrict:
case kindFunc:
case kindFuncProto:
data = make([]btfParam, header.Vlen())
case kindVar:
data = new(btfVariable)
case kindDatasec:
data = make([]btfVarSecinfo, header.Vlen())
case kindFloat:
default:
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}
if data == nil {
types = append(types, rawType{header, nil})
continue
}
if err := binary.Read(r, bo, data); err != nil {
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
}
types = append(types, rawType{header, data})
}
}
func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
}

@ -0,0 +1,44 @@
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
package btf
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticFunc-0]
_ = x[GlobalFunc-1]
_ = x[ExternFunc-2]
}
const _FuncLinkage_name = "staticglobalextern"
var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
func (i FuncLinkage) String() string {
if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[StaticVar-0]
_ = x[GlobalVar-1]
_ = x[ExternVar-2]
}
const _VarLinkage_name = "staticglobalextern"
var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
func (i VarLinkage) String() string {
if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
}

888
vendor/github.com/cilium/ebpf/internal/btf/core.go generated vendored Normal file

@ -0,0 +1,888 @@
package btf
import (
"errors"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
"github.com/cilium/ebpf/asm"
)
// Code in this file is derived from libbpf, which is available under a BSD
// 2-Clause license.
// COREFixup is the result of computing a CO-RE relocation for a target.
type COREFixup struct {
Kind COREKind
Local uint32
Target uint32
Poison bool
}
func (f COREFixup) equal(other COREFixup) bool {
return f.Local == other.Local && f.Target == other.Target
}
func (f COREFixup) String() string {
if f.Poison {
return fmt.Sprintf("%s=poison", f.Kind)
}
return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
}
func (f COREFixup) apply(ins *asm.Instruction) error {
if f.Poison {
return errors.New("can't poison individual instruction")
}
switch class := ins.OpCode.Class(); class {
case asm.LdXClass, asm.StClass, asm.StXClass:
if want := int16(f.Local); want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
}
if f.Target > math.MaxInt16 {
return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
}
ins.Offset = int16(f.Target)
case asm.LdClass:
if !ins.IsConstantLoad(asm.DWord) {
return fmt.Errorf("not a dword-sized immediate load")
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
}
ins.Constant = int64(f.Target)
case asm.ALUClass:
if ins.OpCode.ALUOp() == asm.Swap {
return fmt.Errorf("relocation against swap")
}
fallthrough
case asm.ALU64Class:
if src := ins.OpCode.Source(); src != asm.ImmSource {
return fmt.Errorf("invalid source %s", src)
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
}
if f.Target > math.MaxInt32 {
return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
}
ins.Constant = int64(f.Target)
default:
return fmt.Errorf("invalid class %s", class)
}
return nil
}
func (f COREFixup) isNonExistant() bool {
return f.Kind.checksForExistence() && f.Target == 0
}
type COREFixups map[uint64]COREFixup
// Apply a set of CO-RE relocations to a BPF program.
func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
if len(fs) == 0 {
cpy := make(asm.Instructions, len(insns))
copy(cpy, insns)
return insns, nil
}
cpy := make(asm.Instructions, 0, len(insns))
iter := insns.Iterate()
for iter.Next() {
fixup, ok := fs[iter.Offset.Bytes()]
if !ok {
cpy = append(cpy, *iter.Ins)
continue
}
ins := *iter.Ins
if fixup.Poison {
const badRelo = asm.BuiltinFunc(0xbad2310)
cpy = append(cpy, badRelo.Call())
if ins.OpCode.IsDWordLoad() {
// 64 bit constant loads occupy two raw bpf instructions, so
// we need to add another instruction as padding.
cpy = append(cpy, badRelo.Call())
}
continue
}
if err := fixup.apply(&ins); err != nil {
return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
}
cpy = append(cpy, ins)
}
return cpy, nil
}
// COREKind is the type of CO-RE relocation
type COREKind uint32
const (
reloFieldByteOffset COREKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
reloFieldLShiftU64 /* bitfield-specific left bitshift */
reloFieldRShiftU64 /* bitfield-specific right bitshift */
reloTypeIDLocal /* type ID in local BPF object */
reloTypeIDTarget /* type ID in target kernel */
reloTypeExists /* type existence in target kernel */
reloTypeSize /* type size in bytes */
reloEnumvalExists /* enum value existence in target kernel */
reloEnumvalValue /* enum value integer value */
)
func (k COREKind) String() string {
switch k {
case reloFieldByteOffset:
return "byte_off"
case reloFieldByteSize:
return "byte_sz"
case reloFieldExists:
return "field_exists"
case reloFieldSigned:
return "signed"
case reloFieldLShiftU64:
return "lshift_u64"
case reloFieldRShiftU64:
return "rshift_u64"
case reloTypeIDLocal:
return "local_type_id"
case reloTypeIDTarget:
return "target_type_id"
case reloTypeExists:
return "type_exists"
case reloTypeSize:
return "type_size"
case reloEnumvalExists:
return "enumval_exists"
case reloEnumvalValue:
return "enumval_value"
default:
return "unknown"
}
}
func (k COREKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
}
func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) {
if local.byteOrder != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
}
var ids []TypeID
relosByID := make(map[TypeID]coreRelos)
result := make(COREFixups, len(relos))
for _, relo := range relos {
if relo.kind == reloTypeIDLocal {
// Filtering out reloTypeIDLocal here makes our lives a lot easier
// down the line, since it doesn't have a target at all.
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
result[uint64(relo.insnOff)] = COREFixup{
relo.kind,
uint32(relo.typeID),
uint32(relo.typeID),
false,
}
continue
}
relos, ok := relosByID[relo.typeID]
if !ok {
ids = append(ids, relo.typeID)
}
relosByID[relo.typeID] = append(relos, relo)
}
// Ensure we work on relocations in a deterministic order.
sort.Slice(ids, func(i, j int) bool {
return ids[i] < ids[j]
})
for _, id := range ids {
if int(id) >= len(local.types) {
return nil, fmt.Errorf("invalid type id %d", id)
}
localType := local.types[id]
named, ok := localType.(NamedType)
if !ok || named.TypeName() == "" {
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
}
relos := relosByID[id]
targets := target.namedTypes[essentialName(named.TypeName())]
fixups, err := coreCalculateFixups(localType, targets, relos)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
for i, relo := range relos {
result[uint64(relo.insnOff)] = fixups[i]
}
}
return result, nil
}
var errAmbiguousRelocation = errors.New("ambiguous relocation")
var errImpossibleRelocation = errors.New("impossible relocation")
// coreCalculateFixups calculates the fixups for the given relocations using
// the "best" target.
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]COREFixup, error) {
localID := local.ID()
local, err := copyType(local, skipQualifierAndTypedef)
if err != nil {
return nil, err
}
bestScore := len(relos)
var bestFixups []COREFixup
for i := range targets {
targetID := targets[i].ID()
target, err := copyType(targets[i], skipQualifierAndTypedef)
if err != nil {
return nil, err
}
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
}
if fixup.Poison || fixup.isNonExistant() {
score++
}
fixups = append(fixups, fixup)
}
if score > bestScore {
// We have a better target already, ignore this one.
continue
}
if score < bestScore {
// This is the best target yet, use it.
bestScore = score
bestFixups = fixups
continue
}
// Some other target has the same score as the current one. Make sure
// the fixups agree with each other.
for i, fixup := range bestFixups {
if !fixup.equal(fixups[i]) {
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
}
}
}
if bestFixups == nil {
// Nothing at all matched, probably because there are no suitable
// targets at all. Poison everything!
bestFixups = make([]COREFixup, len(relos))
for i, relo := range relos {
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
}
}
return bestFixups, nil
}
// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{relo.kind, local, target, false}, nil
}
poison := func() (COREFixup, error) {
if relo.kind.checksForExistence() {
return fixup(1, 0)
}
return COREFixup{relo.kind, 0, 0, true}, nil
}
zero := COREFixup{}
switch relo.kind {
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
err := coreAreTypesCompatible(local, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
}
switch relo.kind {
case reloTypeExists:
return fixup(1, 1)
case reloTypeIDTarget:
return fixup(uint32(localID), uint32(targetID))
case reloTypeSize:
localSize, err := Sizeof(local)
if err != nil {
return zero, err
}
targetSize, err := Sizeof(target)
if err != nil {
return zero, err
}
return fixup(uint32(localSize), uint32(targetSize))
}
case reloEnumvalValue, reloEnumvalExists:
localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
}
switch relo.kind {
case reloEnumvalExists:
return fixup(1, 1)
case reloEnumvalValue:
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
if _, ok := target.(*Fwd); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
// we'll find it in one of the other iterations.
return poison()
}
localField, targetField, err := coreFindField(local, relo.accessor, target)
if errors.Is(err, errImpossibleRelocation) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
}
switch relo.kind {
case reloFieldExists:
return fixup(1, 1)
case reloFieldByteOffset:
return fixup(localField.offset/8, targetField.offset/8)
case reloFieldByteSize:
localSize, err := Sizeof(localField.Type)
if err != nil {
return zero, err
}
targetSize, err := Sizeof(targetField.Type)
if err != nil {
return zero, err
}
return fixup(uint32(localSize), uint32(targetSize))
}
}
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
}
/* coreAccessor contains a path through a struct. It contains at least one index.
*
* The interpretation depends on the kind of the relocation. The following is
* taken from struct bpf_core_relo in libbpf_internal.h:
*
* - for field-based relocations, string encodes an accessed field using
* a sequence of field and array indices, separated by colon (:). It's
* conceptually very close to LLVM's getelementptr ([0]) instruction's
* arguments for identifying offset to a field.
* - for type-based relocations, strings is expected to be just "0";
* - for enum value-based relocations, string contains an index of enum
* value within its enum type;
*
* Example to provide a better feel.
*
* struct sample {
* int a;
* struct {
* int b[10];
* };
* };
*
* struct sample s = ...;
* int x = &s->a; // encoded as "0:0" (a is field #0)
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
* // b is field #0 inside anon struct, accessing elem #5)
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
*/
type coreAccessor []int
func parseCoreAccessor(accessor string) (coreAccessor, error) {
if accessor == "" {
return nil, fmt.Errorf("empty accessor")
}
parts := strings.Split(accessor, ":")
result := make(coreAccessor, 0, len(parts))
for _, part := range parts {
// 31 bits to avoid overflowing int on 32 bit platforms.
index, err := strconv.ParseUint(part, 10, 31)
if err != nil {
return nil, fmt.Errorf("accessor index %q: %s", part, err)
}
result = append(result, int(index))
}
return result, nil
}
func (ca coreAccessor) String() string {
strs := make([]string, 0, len(ca))
for _, i := range ca {
strs = append(strs, strconv.Itoa(i))
}
return strings.Join(strs, ":")
}
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
e, ok := t.(*Enum)
if !ok {
return nil, fmt.Errorf("not an enum: %s", t)
}
if len(ca) > 1 {
return nil, fmt.Errorf("invalid accessor %s for enum", ca)
}
i := ca[0]
if i >= len(e.Values) {
return nil, fmt.Errorf("invalid index %d for %s", i, e)
}
return &e.Values[i], nil
}
type coreField struct {
Type Type
offset uint32
}
func adjustOffset(base uint32, t Type, n int) (uint32, error) {
size, err := Sizeof(t)
if err != nil {
return 0, err
}
return base + (uint32(n) * uint32(size) * 8), nil
}
// coreFindField descends into the local type using the accessor and tries to
// find an equivalent field in target at each step.
//
// Returns the field and the offset of the field from the start of
// target in bits.
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
// The first index is used to offset a pointer of the base type like
// when accessing an array.
localOffset, err := adjustOffset(0, local, localAcc[0])
if err != nil {
return coreField{}, coreField{}, err
}
targetOffset, err := adjustOffset(0, target, localAcc[0])
if err != nil {
return coreField{}, coreField{}, err
}
if err := coreAreMembersCompatible(local, target); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
var localMaybeFlex, targetMaybeFlex bool
for _, acc := range localAcc[1:] {
switch localType := local.(type) {
case composite:
// For composite types acc is used to find the field in the local type,
// and then we try to find a field in target with the same name.
localMembers := localType.members()
if acc >= len(localMembers) {
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
}
localMember := localMembers[acc]
if localMember.Name == "" {
_, ok := localMember.Type.(composite)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
}
// This is an anonymous struct or union, ignore it.
local = localMember.Type
localOffset += localMember.OffsetBits
localMaybeFlex = false
continue
}
targetType, ok := target.(composite)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
}
targetMember, last, err := coreFindMember(targetType, localMember.Name)
if err != nil {
return coreField{}, coreField{}, err
}
if targetMember.BitfieldSize > 0 {
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
}
local = localMember.Type
localMaybeFlex = acc == len(localMembers)-1
localOffset += localMember.OffsetBits
target = targetMember.Type
targetMaybeFlex = last
targetOffset += targetMember.OffsetBits
case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.(*Array)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
}
if localType.Nelems == 0 && !localMaybeFlex {
return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
}
if targetType.Nelems == 0 && !targetMaybeFlex {
return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
}
if localType.Nelems > 0 && acc >= int(localType.Nelems) {
return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
}
if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
}
local = localType.Type
localMaybeFlex = false
localOffset, err = adjustOffset(localOffset, local, acc)
if err != nil {
return coreField{}, coreField{}, err
}
target = targetType.Type
targetMaybeFlex = false
targetOffset, err = adjustOffset(targetOffset, target, acc)
if err != nil {
return coreField{}, coreField{}, err
}
default:
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
}
if err := coreAreMembersCompatible(local, target); err != nil {
return coreField{}, coreField{}, err
}
}
return coreField{local, localOffset}, coreField{target, targetOffset}, nil
}
// coreFindMember finds a member in a composite type while handling anonymous
// structs and unions.
func coreFindMember(typ composite, name string) (Member, bool, error) {
if name == "" {
return Member{}, false, errors.New("can't search for anonymous member")
}
type offsetTarget struct {
composite
offset uint32
}
targets := []offsetTarget{{typ, 0}}
visited := make(map[composite]bool)
for i := 0; i < len(targets); i++ {
target := targets[i]
// Only visit targets once to prevent infinite recursion.
if visited[target] {
continue
}
if len(visited) >= maxTypeDepth {
// This check is different than libbpf, which restricts the entire
// path to BPF_CORE_SPEC_MAX_LEN items.
return Member{}, false, fmt.Errorf("type is nested too deep")
}
visited[target] = true
members := target.members()
for j, member := range members {
if member.Name == name {
// NB: This is safe because member is a copy.
member.OffsetBits += target.offset
return member, j == len(members)-1, nil
}
// The names don't match, but this member could be an anonymous struct
// or union.
if member.Name != "" {
continue
}
comp, ok := member.Type.(composite)
if !ok {
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
}
targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits})
}
}
return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
}
// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
localValue, err := localAcc.enumValue(local)
if err != nil {
return nil, nil, err
}
targetEnum, ok := target.(*Enum)
if !ok {
return nil, nil, errImpossibleRelocation
}
localName := essentialName(localValue.Name)
for i, targetValue := range targetEnum.Values {
if essentialName(targetValue.Name) != localName {
continue
}
return localValue, &targetEnum.Values[i], nil
}
return nil, nil, errImpossibleRelocation
}
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
*
* Check local and target types for compatibility. This check is used for
* type-based CO-RE relocations and follow slightly different rules than
* field-based relocations. This function assumes that root types were already
* checked for name match. Beyond that initial root-level name check, names
* are completely ignored. Compatibility rules are as follows:
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
* kind should match for local and target types (i.e., STRUCT is not
* compatible with UNION);
* - for ENUMs, the size is ignored;
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
* - FUNC_PROTOs are compatible if they have compatible signature: same
* number of input args and compatible return and argument types.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
* Returns errImpossibleRelocation if types are not compatible.
*/
func coreAreTypesCompatible(localType Type, targetType Type) error {
var (
localTs, targetTs typeDeque
l, t = &localType, &targetType
depth = 0
)
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
if depth >= maxTypeDepth {
return errors.New("types are nested too deep")
}
localType = *l
targetType = *t
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
}
switch lv := (localType).(type) {
case *Void, *Struct, *Union, *Enum, *Fwd:
// Nothing to do here
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
case *Pointer, *Array:
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
case *FuncProto:
tv := targetType.(*FuncProto)
if len(lv.Params) != len(tv.Params) {
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
}
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
default:
return fmt.Errorf("unsupported type %T", localType)
}
}
if l != nil {
return fmt.Errorf("dangling local type %T", *l)
}
if t != nil {
return fmt.Errorf("dangling target type %T", *t)
}
return nil
}
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
*
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
*
* Check two types for compatibility for the purpose of field access
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
* are relocating semantically compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed;
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible;
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
* least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
* - for INT, size and signedness are ignored;
* - any two FLOATs are always compatible;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* [ NB: coreAreMembersCompatible doesn't recurse, this check is done
* by coreFindField. ]
* - everything else shouldn't be ever a target of relocation.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
* Returns errImpossibleRelocation if the members are not compatible.
*/
func coreAreMembersCompatible(localType Type, targetType Type) error {
doNamesMatch := func(a, b string) error {
if a == "" || b == "" {
// allow anonymous and named type to match
return nil
}
if essentialName(a) == essentialName(b) {
return nil
}
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
}
_, lok := localType.(composite)
_, tok := targetType.(composite)
if lok && tok {
return nil
}
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
}
switch lv := localType.(type) {
case *Array, *Pointer, *Float:
return nil
case *Enum:
tv := targetType.(*Enum)
return doNamesMatch(lv.Name, tv.Name)
case *Fwd:
tv := targetType.(*Fwd)
return doNamesMatch(lv.Name, tv.Name)
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
return nil
default:
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
}
}
func skipQualifierAndTypedef(typ Type) (Type, error) {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
case *Typedef:
result = v.Type
default:
return result, nil
}
}
return nil, errors.New("exceeded type depth")
}

8
vendor/github.com/cilium/ebpf/internal/btf/doc.go generated vendored Normal file

@ -0,0 +1,8 @@
// Package btf handles data encoded according to the BPF Type Format.
//
// The canonical documentation lives in the Linux kernel repository and is
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
//
// The API is very much unstable. You should only use this via the main
// ebpf library.
package btf

312
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go generated vendored Normal file

@ -0,0 +1,312 @@
package btf
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
)
type btfExtHeader struct {
Magic uint16
Version uint8
Flags uint8
HdrLen uint32
FuncInfoOff uint32
FuncInfoLen uint32
LineInfoOff uint32
LineInfoLen uint32
}
type btfExtCoreHeader struct {
CoreReloOff uint32
CoreReloLen uint32
}
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) {
var header btfExtHeader
var coreHeader btfExtCoreHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
if remainder < 0 {
return nil, nil, nil, errors.New("header is too short")
}
coreHdrSize := int64(binary.Size(&coreHeader))
if remainder >= coreHdrSize {
if err := binary.Read(r, bo, &coreHeader); err != nil {
return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
}
remainder -= coreHdrSize
}
// Of course, the .BTF.ext header has different semantics than the
// .BTF ext header. We need to ignore non-null values.
_, err = io.CopyN(io.Discard, r, remainder)
if err != nil {
return nil, nil, nil, fmt.Errorf("header padding: %v", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
}
buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
funcInfo, err = parseExtInfo(buf, bo, strings)
if err != nil {
return nil, nil, nil, fmt.Errorf("function info: %w", err)
}
if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
}
buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
lineInfo, err = parseExtInfo(buf, bo, strings)
if err != nil {
return nil, nil, nil, fmt.Errorf("line info: %w", err)
}
if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
}
relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
if err != nil {
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
}
}
return funcInfo, lineInfo, relos, nil
}
type btfExtInfoSec struct {
SecNameOff uint32
NumInfo uint32
}
type extInfoRecord struct {
InsnOff uint64
Opaque []byte
}
type extInfo struct {
byteOrder binary.ByteOrder
recordSize uint32
records []extInfoRecord
}
func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
if other.byteOrder != ei.byteOrder {
return extInfo{}, fmt.Errorf("ext_info byte order mismatch, want %v (got %v)", ei.byteOrder, other.byteOrder)
}
if other.recordSize != ei.recordSize {
return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
}
records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
records = append(records, ei.records...)
for _, info := range other.records {
records = append(records, extInfoRecord{
InsnOff: info.InsnOff + offset,
Opaque: info.Opaque,
})
}
return extInfo{ei.byteOrder, ei.recordSize, records}, nil
}
func (ei extInfo) MarshalBinary() ([]byte, error) {
if ei.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("%s is not the native byte order", ei.byteOrder)
}
if len(ei.records) == 0 {
return nil, nil
}
buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
for _, info := range ei.records {
// The kernel expects offsets in number of raw bpf instructions,
// while the ELF tracks it in bytes.
insnOff := uint32(info.InsnOff / asm.InstructionSize)
if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
return nil, fmt.Errorf("can't write instruction offset: %v", err)
}
buf.Write(info.Opaque)
}
return buf.Bytes(), nil
}
func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
const maxRecordSize = 256
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, fmt.Errorf("can't read record size: %v", err)
}
if recordSize < 4 {
// Need at least insnOff
return nil, errors.New("record size too short")
}
if recordSize > maxRecordSize {
return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
}
result := make(map[string]extInfo)
for {
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
var records []extInfoRecord
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var byteOff uint32
if err := binary.Read(r, bo, &byteOff); err != nil {
return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
}
buf := make([]byte, int(recordSize-4))
if _, err := io.ReadFull(r, buf); err != nil {
return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
}
if byteOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
}
records = append(records, extInfoRecord{uint64(byteOff), buf})
}
result[secName] = extInfo{
bo,
recordSize,
records,
}
}
}
// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
type bpfCoreRelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
Kind COREKind
}
type coreRelo struct {
insnOff uint32
typeID TypeID
accessor coreAccessor
kind COREKind
}
type coreRelos []coreRelo
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
// by offset.
func (r coreRelos) append(other coreRelos, offset uint64) coreRelos {
result := make([]coreRelo, 0, len(r)+len(other))
result = append(result, r...)
for _, relo := range other {
relo.insnOff += uint32(offset)
result = append(result, relo)
}
return result
}
var extInfoReloSize = binary.Size(bpfCoreRelo{})
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) {
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return nil, fmt.Errorf("read record size: %v", err)
}
if recordSize != uint32(extInfoReloSize) {
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string]coreRelos)
for {
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
var relos coreRelos
for i := uint32(0); i < infoHeader.NumInfo; i++ {
var relo bpfCoreRelo
if err := binary.Read(r, bo, &relo); err != nil {
return nil, fmt.Errorf("section %v: read record: %v", secName, err)
}
if relo.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
}
accessorStr, err := strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCoreAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
relos = append(relos, coreRelo{
relo.InsnOff,
relo.TypeID,
accessor,
relo.Kind,
})
}
result[secName] = relos
}
}
func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err != nil {
return "", nil, fmt.Errorf("read ext info header: %w", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return "", nil, fmt.Errorf("get section name: %w", err)
}
if infoHeader.NumInfo == 0 {
return "", nil, fmt.Errorf("section %s has zero records", secName)
}
return secName, &infoHeader, nil
}

50
vendor/github.com/cilium/ebpf/internal/btf/fuzz.go generated vendored Normal file

@ -0,0 +1,50 @@
//go:build gofuzz
// +build gofuzz
// Use with https://github.com/dvyukov/go-fuzz
package btf
import (
"bytes"
"encoding/binary"
"github.com/cilium/ebpf/internal"
)
func FuzzSpec(data []byte) int {
if len(data) < binary.Size(btfHeader{}) {
return -1
}
spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
if err != nil {
if spec != nil {
panic("spec is not nil")
}
return 0
}
if spec == nil {
panic("spec is nil")
}
return 1
}
func FuzzExtInfo(data []byte) int {
if len(data) < binary.Size(btfExtHeader{}) {
return -1
}
table := stringTable("\x00foo\x00barfoo\x00")
info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
if err != nil {
if info != nil {
panic("info is not nil")
}
return 0
}
if info == nil {
panic("info is nil")
}
return 1
}

48
vendor/github.com/cilium/ebpf/internal/btf/info.go generated vendored Normal file

@ -0,0 +1,48 @@
package btf
import (
"bytes"
"github.com/cilium/ebpf/internal"
)
// info describes a BTF object.
type info struct {
BTF *Spec
ID ID
// Name is an identifying name for the BTF, currently only used by the
// kernel.
Name string
// KernelBTF is true if the BTf originated with the kernel and not
// userspace.
KernelBTF bool
}
func newInfoFromFd(fd *internal.FD) (*info, error) {
// We invoke the syscall once with a empty BTF and name buffers to get size
// information to allocate buffers. Then we invoke it a second time with
// buffers to receive the data.
bpfInfo, err := bpfGetBTFInfoByFD(fd, nil, nil)
if err != nil {
return nil, err
}
btfBuffer := make([]byte, bpfInfo.btfSize)
nameBuffer := make([]byte, bpfInfo.nameLen)
bpfInfo, err = bpfGetBTFInfoByFD(fd, btfBuffer, nameBuffer)
if err != nil {
return nil, err
}
spec, err := loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil)
if err != nil {
return nil, err
}
return &info{
BTF: spec,
ID: ID(bpfInfo.id),
Name: internal.CString(nameBuffer),
KernelBTF: bpfInfo.kernelBTF != 0,
}, nil
}

54
vendor/github.com/cilium/ebpf/internal/btf/strings.go generated vendored Normal file

@ -0,0 +1,54 @@
package btf
import (
"bytes"
"errors"
"fmt"
"io"
)
type stringTable []byte
func readStringTable(r io.Reader) (stringTable, error) {
contents, err := io.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("can't read string table: %v", err)
}
if len(contents) < 1 {
return nil, errors.New("string table is empty")
}
if contents[0] != '\x00' {
return nil, errors.New("first item in string table is non-empty")
}
if contents[len(contents)-1] != '\x00' {
return nil, errors.New("string table isn't null terminated")
}
return stringTable(contents), nil
}
func (st stringTable) Lookup(offset uint32) (string, error) {
if int64(offset) > int64(^uint(0)>>1) {
return "", fmt.Errorf("offset %d overflows int", offset)
}
pos := int(offset)
if pos >= len(st) {
return "", fmt.Errorf("offset %d is out of bounds", offset)
}
if pos > 0 && st[pos-1] != '\x00' {
return "", fmt.Errorf("offset %d isn't start of a string", offset)
}
str := st[pos:]
end := bytes.IndexByte(str, '\x00')
if end == -1 {
return "", fmt.Errorf("offset %d isn't null terminated", offset)
}
return string(str[:end]), nil
}

31
vendor/github.com/cilium/ebpf/internal/btf/syscalls.go generated vendored Normal file

@ -0,0 +1,31 @@
package btf
import (
"fmt"
"unsafe"
"github.com/cilium/ebpf/internal"
)
type bpfBTFInfo struct {
btf internal.Pointer
btfSize uint32
id uint32
name internal.Pointer
nameLen uint32
kernelBTF uint32
}
func bpfGetBTFInfoByFD(fd *internal.FD, btf, name []byte) (*bpfBTFInfo, error) {
info := bpfBTFInfo{
btf: internal.NewSlicePointer(btf),
btfSize: uint32(len(btf)),
name: internal.NewSlicePointer(name),
nameLen: uint32(len(name)),
}
if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
return nil, fmt.Errorf("can't get program info: %w", err)
}
return &info, nil
}

957
vendor/github.com/cilium/ebpf/internal/btf/types.go generated vendored Normal file

@ -0,0 +1,957 @@
package btf
import (
"fmt"
"math"
"strings"
)
const maxTypeDepth = 32
// TypeID identifies a type in a BTF section.
type TypeID uint32
// ID implements part of the Type interface.
func (tid TypeID) ID() TypeID {
return tid
}
// Type represents a type described by BTF.
type Type interface {
ID() TypeID
String() string
// Make a copy of the type, without copying Type members.
copy() Type
// Enumerate all nested Types. Repeated calls must visit nested
// types in the same order.
walk(*typeDeque)
}
// NamedType is a type with a name.
type NamedType interface {
Type
// Name of the type, empty for anonymous types.
TypeName() string
}
var (
_ NamedType = (*Int)(nil)
_ NamedType = (*Struct)(nil)
_ NamedType = (*Union)(nil)
_ NamedType = (*Enum)(nil)
_ NamedType = (*Fwd)(nil)
_ NamedType = (*Func)(nil)
_ NamedType = (*Typedef)(nil)
_ NamedType = (*Var)(nil)
_ NamedType = (*Datasec)(nil)
_ NamedType = (*Float)(nil)
)
// Void is the unit type of BTF.
type Void struct{}
func (v *Void) ID() TypeID { return 0 }
func (v *Void) String() string { return "void#0" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
func (v *Void) walk(*typeDeque) {}
type IntEncoding byte
const (
Signed IntEncoding = 1 << iota
Char
Bool
)
// Int is an integer of a given length.
type Int struct {
TypeID
Name string
// The size of the integer in bytes.
Size uint32
Encoding IntEncoding
// OffsetBits is the starting bit offset. Currently always 0.
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
OffsetBits uint32
Bits byte
}
func (i *Int) String() string {
var s strings.Builder
switch {
case i.Encoding&Char != 0:
s.WriteString("char")
case i.Encoding&Bool != 0:
s.WriteString("bool")
default:
if i.Encoding&Signed == 0 {
s.WriteRune('u')
}
s.WriteString("int")
fmt.Fprintf(&s, "%d", i.Size*8)
}
fmt.Fprintf(&s, "#%d", i.TypeID)
if i.Bits > 0 {
fmt.Fprintf(&s, "[bits=%d]", i.Bits)
}
return s.String()
}
func (i *Int) TypeName() string { return i.Name }
func (i *Int) size() uint32 { return i.Size }
func (i *Int) walk(*typeDeque) {}
func (i *Int) copy() Type {
cpy := *i
return &cpy
}
func (i *Int) isBitfield() bool {
return i.OffsetBits > 0
}
// Pointer is a pointer to another type.
type Pointer struct {
TypeID
Target Type
}
func (p *Pointer) String() string {
return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
}
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
}
// Array is an array with a fixed number of elements.
type Array struct {
TypeID
Type Type
Nelems uint32
}
func (arr *Array) String() string {
return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
}
func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
}
// Struct is a compound type of consecutive members.
type Struct struct {
TypeID
Name string
// The size of the struct including padding, in bytes
Size uint32
Members []Member
}
func (s *Struct) String() string {
return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
}
func (s *Struct) TypeName() string { return s.Name }
func (s *Struct) size() uint32 { return s.Size }
func (s *Struct) walk(tdq *typeDeque) {
for i := range s.Members {
tdq.push(&s.Members[i].Type)
}
}
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = copyMembers(s.Members)
return &cpy
}
func (s *Struct) members() []Member {
return s.Members
}
// Union is a compound type where members occupy the same memory.
type Union struct {
TypeID
Name string
// The size of the union including padding, in bytes.
Size uint32
Members []Member
}
func (u *Union) String() string {
return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
}
func (u *Union) TypeName() string { return u.Name }
func (u *Union) size() uint32 { return u.Size }
func (u *Union) walk(tdq *typeDeque) {
for i := range u.Members {
tdq.push(&u.Members[i].Type)
}
}
func (u *Union) copy() Type {
cpy := *u
cpy.Members = copyMembers(u.Members)
return &cpy
}
func (u *Union) members() []Member {
return u.Members
}
func copyMembers(orig []Member) []Member {
cpy := make([]Member, len(orig))
copy(cpy, orig)
return cpy
}
type composite interface {
members() []Member
}
var (
_ composite = (*Struct)(nil)
_ composite = (*Union)(nil)
)
// Member is part of a Struct or Union.
//
// It is not a valid Type.
type Member struct {
Name string
Type Type
// OffsetBits is the bit offset of this member.
OffsetBits uint32
BitfieldSize uint32
}
// Enum lists possible values.
type Enum struct {
TypeID
Name string
Values []EnumValue
}
func (e *Enum) String() string {
return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
}
func (e *Enum) TypeName() string { return e.Name }
// EnumValue is part of an Enum
//
// Is is not a valid Type
type EnumValue struct {
Name string
Value int32
}
func (e *Enum) size() uint32 { return 4 }
func (e *Enum) walk(*typeDeque) {}
func (e *Enum) copy() Type {
cpy := *e
cpy.Values = make([]EnumValue, len(e.Values))
copy(cpy.Values, e.Values)
return &cpy
}
// FwdKind is the type of forward declaration.
type FwdKind int
// Valid types of forward declaration.
const (
FwdStruct FwdKind = iota
FwdUnion
)
func (fk FwdKind) String() string {
switch fk {
case FwdStruct:
return "struct"
case FwdUnion:
return "union"
default:
return fmt.Sprintf("%T(%d)", fk, int(fk))
}
}
// Fwd is a forward declaration of a Type.
type Fwd struct {
TypeID
Name string
Kind FwdKind
}
func (f *Fwd) String() string {
return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
}
func (f *Fwd) TypeName() string { return f.Name }
func (f *Fwd) walk(*typeDeque) {}
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
}
// Typedef is an alias of a Type.
type Typedef struct {
TypeID
Name string
Type Type
}
func (td *Typedef) String() string {
return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
}
func (td *Typedef) TypeName() string { return td.Name }
func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
func (td *Typedef) copy() Type {
cpy := *td
return &cpy
}
// Volatile is a qualifier.
type Volatile struct {
TypeID
Type Type
}
func (v *Volatile) String() string {
return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
}
func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
}
// Const is a qualifier.
type Const struct {
TypeID
Type Type
}
func (c *Const) String() string {
return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
}
func (c *Const) qualify() Type { return c.Type }
func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
func (c *Const) copy() Type {
cpy := *c
return &cpy
}
// Restrict is a qualifier.
type Restrict struct {
TypeID
Type Type
}
func (r *Restrict) String() string {
return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
}
func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
}
// Func is a function definition.
type Func struct {
TypeID
Name string
Type Type
Linkage FuncLinkage
}
func (f *Func) String() string {
return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID())
}
func (f *Func) TypeName() string { return f.Name }
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
func (f *Func) copy() Type {
cpy := *f
return &cpy
}
// FuncProto is a function declaration.
type FuncProto struct {
TypeID
Return Type
Params []FuncParam
}
func (fp *FuncProto) String() string {
var s strings.Builder
fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
for _, param := range fp.Params {
fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
}
fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
return s.String()
}
func (fp *FuncProto) walk(tdq *typeDeque) {
tdq.push(&fp.Return)
for i := range fp.Params {
tdq.push(&fp.Params[i].Type)
}
}
func (fp *FuncProto) copy() Type {
cpy := *fp
cpy.Params = make([]FuncParam, len(fp.Params))
copy(cpy.Params, fp.Params)
return &cpy
}
type FuncParam struct {
Name string
Type Type
}
// Var is a global variable.
type Var struct {
TypeID
Name string
Type Type
Linkage VarLinkage
}
func (v *Var) String() string {
return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name)
}
func (v *Var) TypeName() string { return v.Name }
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Var) copy() Type {
cpy := *v
return &cpy
}
// Datasec is a global program section containing data.
type Datasec struct {
TypeID
Name string
Size uint32
Vars []VarSecinfo
}
func (ds *Datasec) String() string {
return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
}
func (ds *Datasec) TypeName() string { return ds.Name }
func (ds *Datasec) size() uint32 { return ds.Size }
func (ds *Datasec) walk(tdq *typeDeque) {
for i := range ds.Vars {
tdq.push(&ds.Vars[i].Type)
}
}
func (ds *Datasec) copy() Type {
cpy := *ds
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
copy(cpy.Vars, ds.Vars)
return &cpy
}
// VarSecinfo describes variable in a Datasec.
//
// It is not a valid Type.
type VarSecinfo struct {
Type Type
Offset uint32
Size uint32
}
// Float is a float of a given length.
type Float struct {
TypeID
Name string
// The size of the float in bytes.
Size uint32
}
func (f *Float) String() string {
return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name)
}
func (f *Float) TypeName() string { return f.Name }
func (f *Float) size() uint32 { return f.Size }
func (f *Float) walk(*typeDeque) {}
func (f *Float) copy() Type {
cpy := *f
return &cpy
}
type sizer interface {
size() uint32
}
var (
_ sizer = (*Int)(nil)
_ sizer = (*Pointer)(nil)
_ sizer = (*Struct)(nil)
_ sizer = (*Union)(nil)
_ sizer = (*Enum)(nil)
_ sizer = (*Datasec)(nil)
)
type qualifier interface {
qualify() Type
}
var (
_ qualifier = (*Const)(nil)
_ qualifier = (*Restrict)(nil)
_ qualifier = (*Volatile)(nil)
)
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
func Sizeof(typ Type) (int, error) {
var (
n = int64(1)
elem int64
)
for i := 0; i < maxTypeDepth; i++ {
switch v := typ.(type) {
case *Array:
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
return 0, fmt.Errorf("type %s: overflow", typ)
}
// Arrays may be of zero length, which allows
// n to be zero as well.
n *= int64(v.Nelems)
typ = v.Type
continue
case sizer:
elem = int64(v.size())
case *Typedef:
typ = v.Type
continue
case qualifier:
typ = v.qualify()
continue
default:
return 0, fmt.Errorf("unsized type %T", typ)
}
if n > 0 && elem > math.MaxInt64/n {
return 0, fmt.Errorf("type %s: overflow", typ)
}
size := n * elem
if int64(int(size)) != size {
return 0, fmt.Errorf("type %s: overflow", typ)
}
return int(size), nil
}
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
}
// copy a Type recursively.
//
// typ may form a cycle.
//
// Returns any errors from transform verbatim.
func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) {
copies := make(copier)
return typ, copies.copy(&typ, transform)
}
// copy a slice of Types recursively.
//
// Types may form a cycle.
//
// Returns any errors from transform verbatim.
func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) {
result := make([]Type, len(types))
copy(result, types)
copies := make(copier)
for i := range result {
if err := copies.copy(&result[i], transform); err != nil {
return nil, err
}
}
return result, nil
}
type copier map[Type]Type
func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error {
var work typeDeque
for t := typ; t != nil; t = work.pop() {
// *t is the identity of the type.
if cpy := c[*t]; cpy != nil {
*t = cpy
continue
}
var cpy Type
if transform != nil {
tf, err := transform(*t)
if err != nil {
return fmt.Errorf("copy %s: %w", *t, err)
}
cpy = tf.copy()
} else {
cpy = (*t).copy()
}
c[*t] = cpy
*t = cpy
// Mark any nested types for copying.
cpy.walk(&work)
}
return nil
}
// typeDeque keeps track of pointers to types which still
// need to be visited.
type typeDeque struct {
types []*Type
read, write uint64
mask uint64
}
func (dq *typeDeque) empty() bool {
return dq.read == dq.write
}
// push adds a type to the stack.
func (dq *typeDeque) push(t *Type) {
if dq.write-dq.read < uint64(len(dq.types)) {
dq.types[dq.write&dq.mask] = t
dq.write++
return
}
new := len(dq.types) * 2
if new == 0 {
new = 8
}
types := make([]*Type, new)
pivot := dq.read & dq.mask
n := copy(types, dq.types[pivot:])
n += copy(types[n:], dq.types[:pivot])
types[n] = t
dq.types = types
dq.mask = uint64(new) - 1
dq.read, dq.write = 0, uint64(n+1)
}
// shift returns the first element or null.
func (dq *typeDeque) shift() *Type {
if dq.empty() {
return nil
}
index := dq.read & dq.mask
t := dq.types[index]
dq.types[index] = nil
dq.read++
return t
}
// pop returns the last element or null.
func (dq *typeDeque) pop() *Type {
if dq.empty() {
return nil
}
dq.write--
index := dq.write & dq.mask
t := dq.types[index]
dq.types[index] = nil
return t
}
// all returns all elements.
//
// The deque is empty after calling this method.
func (dq *typeDeque) all() []*Type {
length := dq.write - dq.read
types := make([]*Type, 0, length)
for t := dq.shift(); t != nil; t = dq.shift() {
types = append(types, t)
}
return types
}
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
// it into a graph of Types connected via pointers.
//
// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
// the same name. A Type may form a cyclic graph by pointing at itself.
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]NamedType, err error) {
type fixupDef struct {
id TypeID
expectedKind btfKind
typ *Type
}
var fixups []fixupDef
fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
fixups = append(fixups, fixupDef{id, expectedKind, typ})
}
convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
// NB: The fixup below relies on pre-allocating this array to
// work, since otherwise append might re-allocate members.
members := make([]Member, 0, len(raw))
for i, btfMember := range raw {
name, err := rawStrings.Lookup(btfMember.NameOff)
if err != nil {
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
}
m := Member{
Name: name,
OffsetBits: btfMember.Offset,
}
if kindFlag {
m.BitfieldSize = btfMember.Offset >> 24
m.OffsetBits &= 0xffffff
}
members = append(members, m)
}
for i := range members {
fixup(raw[i].Type, kindUnknown, &members[i].Type)
}
return members, nil
}
types = make([]Type, 0, len(rawTypes))
types = append(types, (*Void)(nil))
namedTypes = make(map[string][]NamedType)
for i, raw := range rawTypes {
var (
// Void is defined to always be type ID 0, and is thus
// omitted from BTF.
id = TypeID(i + 1)
typ Type
)
name, err := rawStrings.Lookup(raw.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
}
switch raw.Kind() {
case kindInt:
encoding, offset, bits := intEncoding(*raw.data.(*uint32))
typ = &Int{id, name, raw.Size(), encoding, offset, bits}
case kindPointer:
ptr := &Pointer{id, nil}
fixup(raw.Type(), kindUnknown, &ptr.Target)
typ = ptr
case kindArray:
btfArr := raw.data.(*btfArray)
// IndexType is unused according to btf.rst.
// Don't make it available right now.
arr := &Array{id, nil, btfArr.Nelems}
fixup(btfArr.Type, kindUnknown, &arr.Type)
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{id, name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
typ = &Union{id, name, raw.Size(), members}
case kindEnum:
rawvals := raw.data.([]btfEnum)
vals := make([]EnumValue, 0, len(rawvals))
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
vals = append(vals, EnumValue{
Name: name,
Value: btfVal.Val,
})
}
typ = &Enum{id, name, vals}
case kindForward:
if raw.KindFlag() {
typ = &Fwd{id, name, FwdUnion}
} else {
typ = &Fwd{id, name, FwdStruct}
}
case kindTypedef:
typedef := &Typedef{id, name, nil}
fixup(raw.Type(), kindUnknown, &typedef.Type)
typ = typedef
case kindVolatile:
volatile := &Volatile{id, nil}
fixup(raw.Type(), kindUnknown, &volatile.Type)
typ = volatile
case kindConst:
cnst := &Const{id, nil}
fixup(raw.Type(), kindUnknown, &cnst.Type)
typ = cnst
case kindRestrict:
restrict := &Restrict{id, nil}
fixup(raw.Type(), kindUnknown, &restrict.Type)
typ = restrict
case kindFunc:
fn := &Func{id, name, nil, raw.Linkage()}
fixup(raw.Type(), kindFuncProto, &fn.Type)
typ = fn
case kindFuncProto:
rawparams := raw.data.([]btfParam)
params := make([]FuncParam, 0, len(rawparams))
for i, param := range rawparams {
name, err := rawStrings.Lookup(param.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
}
params = append(params, FuncParam{
Name: name,
})
}
for i := range params {
fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
}
fp := &FuncProto{id, nil, params}
fixup(raw.Type(), kindUnknown, &fp.Return)
typ = fp
case kindVar:
variable := raw.data.(*btfVariable)
v := &Var{id, name, nil, VarLinkage(variable.Linkage)}
fixup(raw.Type(), kindUnknown, &v.Type)
typ = v
case kindDatasec:
btfVars := raw.data.([]btfVarSecinfo)
vars := make([]VarSecinfo, 0, len(btfVars))
for _, btfVar := range btfVars {
vars = append(vars, VarSecinfo{
Offset: btfVar.Offset,
Size: btfVar.Size,
})
}
for i := range vars {
fixup(btfVars[i].Type, kindVar, &vars[i].Type)
}
typ = &Datasec{id, name, raw.SizeType, vars}
case kindFloat:
typ = &Float{id, name, raw.Size()}
default:
return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
types = append(types, typ)
if named, ok := typ.(NamedType); ok {
if name := essentialName(named.TypeName()); name != "" {
namedTypes[name] = append(namedTypes[name], named)
}
}
}
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types) {
return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
// Default void (id 0) to unknown
rawKind := kindUnknown
if i > 0 {
rawKind = rawTypes[i-1].Kind()
}
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
}
*fixup.typ = types[i]
}
return types, namedTypes, nil
}
// essentialName returns name without a ___ suffix.
func essentialName(name string) string {
lastIdx := strings.LastIndex(name, "___")
if lastIdx > 0 {
return name[:lastIdx]
}
return name
}

62
vendor/github.com/cilium/ebpf/internal/cpu.go generated vendored Normal file

@ -0,0 +1,62 @@
package internal
import (
"fmt"
"os"
"strings"
"sync"
)
var sysCPU struct {
once sync.Once
err error
num int
}
// PossibleCPUs returns the max number of CPUs a system may possibly have
// Logical CPU numbers must be of the form 0-n
func PossibleCPUs() (int, error) {
sysCPU.once.Do(func() {
sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
})
return sysCPU.num, sysCPU.err
}
func parseCPUsFromFile(path string) (int, error) {
spec, err := os.ReadFile(path)
if err != nil {
return 0, err
}
n, err := parseCPUs(string(spec))
if err != nil {
return 0, fmt.Errorf("can't parse %s: %v", path, err)
}
return n, nil
}
// parseCPUs parses the number of cpus from a string produced
// by bitmap_list_string() in the Linux kernel.
// Multiple ranges are rejected, since they can't be unified
// into a single number.
// This is the format of /sys/devices/system/cpu/possible, it
// is not suitable for /sys/devices/system/cpu/online, etc.
func parseCPUs(spec string) (int, error) {
if strings.Trim(spec, "\n") == "0" {
return 1, nil
}
var low, high int
n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
if n != 2 || err != nil {
return 0, fmt.Errorf("invalid format: %s", spec)
}
if low != 0 {
return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
}
// cpus is 0 indexed
return high + 1, nil
}

68
vendor/github.com/cilium/ebpf/internal/elf.go generated vendored Normal file

@ -0,0 +1,68 @@
package internal
import (
"debug/elf"
"fmt"
"io"
)
type SafeELFFile struct {
*elf.File
}
// NewSafeELFFile reads an ELF safely.
//
// Any panic during parsing is turned into an error. This is necessary since
// there are a bunch of unfixed bugs in debug/elf.
//
// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
defer func() {
r := recover()
if r == nil {
return
}
safe = nil
err = fmt.Errorf("reading ELF file panicked: %s", r)
}()
file, err := elf.NewFile(r)
if err != nil {
return nil, err
}
return &SafeELFFile{file}, nil
}
// Symbols is the safe version of elf.File.Symbols.
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF symbols panicked: %s", r)
}()
syms, err = se.File.Symbols()
return
}
// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
defer func() {
r := recover()
if r == nil {
return
}
syms = nil
err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
}()
syms, err = se.File.DynamicSymbols()
return
}

29
vendor/github.com/cilium/ebpf/internal/endian.go generated vendored Normal file

@ -0,0 +1,29 @@
package internal
import (
"encoding/binary"
"unsafe"
)
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian binary.ByteOrder
// Clang is set to either "el" or "eb" depending on the host's endianness.
var ClangEndian string
func init() {
if isBigEndian() {
NativeEndian = binary.BigEndian
ClangEndian = "eb"
} else {
NativeEndian = binary.LittleEndian
ClangEndian = "el"
}
}
func isBigEndian() (ret bool) {
i := int(0x1)
bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
return bs[0] == 0
}

51
vendor/github.com/cilium/ebpf/internal/errors.go generated vendored Normal file

@ -0,0 +1,51 @@
package internal
import (
"bytes"
"errors"
"fmt"
"strings"
"github.com/cilium/ebpf/internal/unix"
)
// ErrorWithLog returns an error that includes logs from the
// kernel verifier.
//
// logErr should be the error returned by the syscall that generated
// the log. It is used to check for truncation of the output.
func ErrorWithLog(err error, log []byte, logErr error) error {
logStr := strings.Trim(CString(log), "\t\r\n ")
if errors.Is(logErr, unix.ENOSPC) {
logStr += " (truncated...)"
}
return &VerifierError{err, logStr}
}
// VerifierError includes information from the eBPF verifier.
type VerifierError struct {
cause error
log string
}
func (le *VerifierError) Unwrap() error {
return le.cause
}
func (le *VerifierError) Error() string {
if le.log == "" {
return le.cause.Error()
}
return fmt.Sprintf("%s: %s", le.cause, le.log)
}
// CString turns a NUL / zero terminated byte buffer into a string.
func CString(in []byte) string {
inLen := bytes.IndexByte(in, 0)
if inLen == -1 {
return ""
}
return string(in[:inLen])
}

69
vendor/github.com/cilium/ebpf/internal/fd.go generated vendored Normal file

@ -0,0 +1,69 @@
package internal
import (
"errors"
"fmt"
"os"
"runtime"
"strconv"
"github.com/cilium/ebpf/internal/unix"
)
var ErrClosedFd = errors.New("use of closed file descriptor")
type FD struct {
raw int64
}
func NewFD(value uint32) *FD {
fd := &FD{int64(value)}
runtime.SetFinalizer(fd, (*FD).Close)
return fd
}
func (fd *FD) String() string {
return strconv.FormatInt(fd.raw, 10)
}
func (fd *FD) Value() (uint32, error) {
if fd.raw < 0 {
return 0, ErrClosedFd
}
return uint32(fd.raw), nil
}
func (fd *FD) Close() error {
if fd.raw < 0 {
return nil
}
value := int(fd.raw)
fd.raw = -1
fd.Forget()
return unix.Close(value)
}
func (fd *FD) Forget() {
runtime.SetFinalizer(fd, nil)
}
func (fd *FD) Dup() (*FD, error) {
if fd.raw < 0 {
return nil, ErrClosedFd
}
dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
if err != nil {
return nil, fmt.Errorf("can't dup fd: %v", err)
}
return NewFD(uint32(dup)), nil
}
func (fd *FD) File(name string) *os.File {
fd.Forget()
return os.NewFile(uintptr(fd.raw), name)
}

100
vendor/github.com/cilium/ebpf/internal/feature.go generated vendored Normal file

@ -0,0 +1,100 @@
package internal
import (
"errors"
"fmt"
"sync"
)
// ErrNotSupported indicates that a feature is not supported by the current kernel.
var ErrNotSupported = errors.New("not supported")
// UnsupportedFeatureError is returned by FeatureTest() functions.
type UnsupportedFeatureError struct {
// The minimum Linux mainline version required for this feature.
// Used for the error string, and for sanity checking during testing.
MinimumVersion Version
// The name of the feature that isn't supported.
Name string
}
func (ufe *UnsupportedFeatureError) Error() string {
if ufe.MinimumVersion.Unspecified() {
return fmt.Sprintf("%s not supported", ufe.Name)
}
return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
}
// Is indicates that UnsupportedFeatureError is ErrNotSupported.
func (ufe *UnsupportedFeatureError) Is(target error) bool {
return target == ErrNotSupported
}
type featureTest struct {
sync.RWMutex
successful bool
result error
}
// FeatureTestFn is used to determine whether the kernel supports
// a certain feature.
//
// The return values have the following semantics:
//
// err == ErrNotSupported: the feature is not available
// err == nil: the feature is available
// err != nil: the test couldn't be executed
type FeatureTestFn func() error
// FeatureTest wraps a function so that it is run at most once.
//
// name should identify the tested feature, while version must be in the
// form Major.Minor[.Patch].
//
// Returns an error wrapping ErrNotSupported if the feature is not supported.
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
v, err := NewVersion(version)
if err != nil {
return func() error { return err }
}
ft := new(featureTest)
return func() error {
ft.RLock()
if ft.successful {
defer ft.RUnlock()
return ft.result
}
ft.RUnlock()
ft.Lock()
defer ft.Unlock()
// check one more time on the off
// chance that two go routines
// were able to call into the write
// lock
if ft.successful {
return ft.result
}
err := fn()
switch {
case errors.Is(err, ErrNotSupported):
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: name,
}
fallthrough
case err == nil:
ft.successful = true
default:
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("detect support for %s: %w", name, err)
}
return ft.result
}
}

16
vendor/github.com/cilium/ebpf/internal/io.go generated vendored Normal file

@ -0,0 +1,16 @@
package internal
import "errors"
// DiscardZeroes makes sure that all written bytes are zero
// before discarding them.
type DiscardZeroes struct{}
func (DiscardZeroes) Write(p []byte) (int, error) {
for _, b := range p {
if b != 0 {
return 0, errors.New("encountered non-zero byte")
}
}
return len(p), nil
}

44
vendor/github.com/cilium/ebpf/internal/pinning.go generated vendored Normal file

@ -0,0 +1,44 @@
package internal
import (
"errors"
"fmt"
"os"
"github.com/cilium/ebpf/internal/unix"
)
func Pin(currentPath, newPath string, fd *FD) error {
if newPath == "" {
return errors.New("given pinning path cannot be empty")
}
if currentPath == newPath {
return nil
}
if currentPath == "" {
return BPFObjPin(newPath, fd)
}
var err error
// Renameat2 is used instead of os.Rename to disallow the new path replacing
// an existing path.
if err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE); err == nil {
// Object is now moved to the new pinning path.
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
}
// Internal state not in sync with the file system so let's fix it.
return BPFObjPin(newPath, fd)
}
func Unpin(pinnedPath string) error {
if pinnedPath == "" {
return nil
}
err := os.Remove(pinnedPath)
if err == nil || os.IsNotExist(err) {
return nil
}
return err
}

31
vendor/github.com/cilium/ebpf/internal/ptr.go generated vendored Normal file

@ -0,0 +1,31 @@
package internal
import (
"unsafe"
"github.com/cilium/ebpf/internal/unix"
)
// NewPointer creates a 64-bit pointer from an unsafe Pointer.
func NewPointer(ptr unsafe.Pointer) Pointer {
return Pointer{ptr: ptr}
}
// NewSlicePointer creates a 64-bit pointer from a byte slice.
func NewSlicePointer(buf []byte) Pointer {
if len(buf) == 0 {
return Pointer{}
}
return Pointer{ptr: unsafe.Pointer(&buf[0])}
}
// NewStringPointer creates a 64-bit pointer from a string.
func NewStringPointer(str string) Pointer {
p, err := unix.BytePtrFromString(str)
if err != nil {
return Pointer{}
}
return Pointer{ptr: unsafe.Pointer(p)}
}

15
vendor/github.com/cilium/ebpf/internal/ptr_32_be.go generated vendored Normal file

@ -0,0 +1,15 @@
//go:build armbe || mips || mips64p32
// +build armbe mips mips64p32
package internal
import (
"unsafe"
)
// Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification.
type Pointer struct {
pad uint32
ptr unsafe.Pointer
}

15
vendor/github.com/cilium/ebpf/internal/ptr_32_le.go generated vendored Normal file

@ -0,0 +1,15 @@
//go:build 386 || amd64p32 || arm || mipsle || mips64p32le
// +build 386 amd64p32 arm mipsle mips64p32le
package internal
import (
"unsafe"
)
// Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification.
type Pointer struct {
ptr unsafe.Pointer
pad uint32
}

14
vendor/github.com/cilium/ebpf/internal/ptr_64.go generated vendored Normal file

@ -0,0 +1,14 @@
//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32
package internal
import (
"unsafe"
)
// Pointer wraps an unsafe.Pointer to be 64bit to
// conform to the syscall specification.
type Pointer struct {
ptr unsafe.Pointer
}

304
vendor/github.com/cilium/ebpf/internal/syscall.go generated vendored Normal file

@ -0,0 +1,304 @@
package internal
import (
"errors"
"fmt"
"path/filepath"
"runtime"
"syscall"
"unsafe"
"github.com/cilium/ebpf/internal/unix"
)
//go:generate stringer -output syscall_string.go -type=BPFCmd
// BPFCmd identifies a subcommand of the bpf syscall.
type BPFCmd int
// Well known BPF commands.
const (
BPF_MAP_CREATE BPFCmd = iota
BPF_MAP_LOOKUP_ELEM
BPF_MAP_UPDATE_ELEM
BPF_MAP_DELETE_ELEM
BPF_MAP_GET_NEXT_KEY
BPF_PROG_LOAD
BPF_OBJ_PIN
BPF_OBJ_GET
BPF_PROG_ATTACH
BPF_PROG_DETACH
BPF_PROG_TEST_RUN
BPF_PROG_GET_NEXT_ID
BPF_MAP_GET_NEXT_ID
BPF_PROG_GET_FD_BY_ID
BPF_MAP_GET_FD_BY_ID
BPF_OBJ_GET_INFO_BY_FD
BPF_PROG_QUERY
BPF_RAW_TRACEPOINT_OPEN
BPF_BTF_LOAD
BPF_BTF_GET_FD_BY_ID
BPF_TASK_FD_QUERY
BPF_MAP_LOOKUP_AND_DELETE_ELEM
BPF_MAP_FREEZE
BPF_BTF_GET_NEXT_ID
BPF_MAP_LOOKUP_BATCH
BPF_MAP_LOOKUP_AND_DELETE_BATCH
BPF_MAP_UPDATE_BATCH
BPF_MAP_DELETE_BATCH
BPF_LINK_CREATE
BPF_LINK_UPDATE
BPF_LINK_GET_FD_BY_ID
BPF_LINK_GET_NEXT_ID
BPF_ENABLE_STATS
BPF_ITER_CREATE
)
// BPF wraps SYS_BPF.
//
// Any pointers contained in attr must use the Pointer type from this package.
func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
runtime.KeepAlive(attr)
var err error
if errNo != 0 {
err = wrappedErrno{errNo}
}
return r1, err
}
type BPFProgLoadAttr struct {
ProgType uint32
InsCount uint32
Instructions Pointer
License Pointer
LogLevel uint32
LogSize uint32
LogBuf Pointer
KernelVersion uint32 // since 4.1 2541517c32be
ProgFlags uint32 // since 4.11 e07b98d9bffe
ProgName BPFObjName // since 4.15 067cae47771c
ProgIfIndex uint32 // since 4.15 1f6f4cb7ba21
ExpectedAttachType uint32 // since 4.17 5e43f899b03a
ProgBTFFd uint32
FuncInfoRecSize uint32
FuncInfo Pointer
FuncInfoCnt uint32
LineInfoRecSize uint32
LineInfo Pointer
LineInfoCnt uint32
AttachBTFID uint32
AttachProgFd uint32
}
// BPFProgLoad wraps BPF_PROG_LOAD.
func BPFProgLoad(attr *BPFProgLoadAttr) (*FD, error) {
for {
fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
// As of ~4.20 the verifier can be interrupted by a signal,
// and returns EAGAIN in that case.
if errors.Is(err, unix.EAGAIN) {
continue
}
if err != nil {
return nil, err
}
return NewFD(uint32(fd)), nil
}
}
type BPFProgAttachAttr struct {
TargetFd uint32
AttachBpfFd uint32
AttachType uint32
AttachFlags uint32
ReplaceBpfFd uint32
}
func BPFProgAttach(attr *BPFProgAttachAttr) error {
_, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
return err
}
type BPFProgDetachAttr struct {
TargetFd uint32
AttachBpfFd uint32
AttachType uint32
}
func BPFProgDetach(attr *BPFProgDetachAttr) error {
_, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
return err
}
type BPFEnableStatsAttr struct {
StatsType uint32
}
func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, fmt.Errorf("enable stats: %w", err)
}
return NewFD(uint32(ptr)), nil
}
type bpfObjAttr struct {
fileName Pointer
fd uint32
fileFlags uint32
}
const bpfFSType = 0xcafe4a11
// BPFObjPin wraps BPF_OBJ_PIN.
func BPFObjPin(fileName string, fd *FD) error {
dirName := filepath.Dir(fileName)
var statfs unix.Statfs_t
if err := unix.Statfs(dirName, &statfs); err != nil {
return err
}
if uint64(statfs.Type) != bpfFSType {
return fmt.Errorf("%s is not on a bpf filesystem", fileName)
}
value, err := fd.Value()
if err != nil {
return err
}
attr := bpfObjAttr{
fileName: NewStringPointer(fileName),
fd: value,
}
_, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return fmt.Errorf("pin object %s: %w", fileName, err)
}
return nil
}
// BPFObjGet wraps BPF_OBJ_GET.
func BPFObjGet(fileName string, flags uint32) (*FD, error) {
attr := bpfObjAttr{
fileName: NewStringPointer(fileName),
fileFlags: flags,
}
ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return nil, fmt.Errorf("get object %s: %w", fileName, err)
}
return NewFD(uint32(ptr)), nil
}
type bpfObjGetInfoByFDAttr struct {
fd uint32
infoLen uint32
info Pointer
}
// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
//
// Available from 4.13.
func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
value, err := fd.Value()
if err != nil {
return err
}
attr := bpfObjGetInfoByFDAttr{
fd: value,
infoLen: uint32(size),
info: NewPointer(info),
}
_, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
if err != nil {
return fmt.Errorf("fd %v: %w", fd, err)
}
return nil
}
type bpfGetFDByIDAttr struct {
id uint32
next uint32
}
// BPFObjGetInfoByFD wraps BPF_*_GET_FD_BY_ID.
//
// Available from 4.13.
func BPFObjGetFDByID(cmd BPFCmd, id uint32) (*FD, error) {
attr := bpfGetFDByIDAttr{
id: id,
}
ptr, err := BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return NewFD(uint32(ptr)), err
}
// BPFObjName is a null-terminated string made up of
// 'A-Za-z0-9_' characters.
type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte
// NewBPFObjName truncates the result if it is too long.
func NewBPFObjName(name string) BPFObjName {
var result BPFObjName
copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
return result
}
type BPFMapCreateAttr struct {
MapType uint32
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
InnerMapFd uint32 // since 4.12 56f668dfe00d
NumaNode uint32 // since 4.14 96eabe7a40aa
MapName BPFObjName // since 4.15 ad5b177bd73f
MapIfIndex uint32
BTFFd uint32
BTFKeyTypeID uint32
BTFValueTypeID uint32
}
func BPFMapCreate(attr *BPFMapCreateAttr) (*FD, error) {
fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return NewFD(uint32(fd)), nil
}
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
// syscall.E* or unix.E* constants.
//
// You should never export an error of this type.
type wrappedErrno struct {
syscall.Errno
}
func (we wrappedErrno) Unwrap() error {
return we.Errno
}
type syscallError struct {
error
errno syscall.Errno
}
func SyscallError(err error, errno syscall.Errno) error {
return &syscallError{err, errno}
}
func (se *syscallError) Is(target error) bool {
return target == se.error
}
func (se *syscallError) Unwrap() error {
return se.errno
}

@ -0,0 +1,56 @@
// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
package internal
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[BPF_MAP_CREATE-0]
_ = x[BPF_MAP_LOOKUP_ELEM-1]
_ = x[BPF_MAP_UPDATE_ELEM-2]
_ = x[BPF_MAP_DELETE_ELEM-3]
_ = x[BPF_MAP_GET_NEXT_KEY-4]
_ = x[BPF_PROG_LOAD-5]
_ = x[BPF_OBJ_PIN-6]
_ = x[BPF_OBJ_GET-7]
_ = x[BPF_PROG_ATTACH-8]
_ = x[BPF_PROG_DETACH-9]
_ = x[BPF_PROG_TEST_RUN-10]
_ = x[BPF_PROG_GET_NEXT_ID-11]
_ = x[BPF_MAP_GET_NEXT_ID-12]
_ = x[BPF_PROG_GET_FD_BY_ID-13]
_ = x[BPF_MAP_GET_FD_BY_ID-14]
_ = x[BPF_OBJ_GET_INFO_BY_FD-15]
_ = x[BPF_PROG_QUERY-16]
_ = x[BPF_RAW_TRACEPOINT_OPEN-17]
_ = x[BPF_BTF_LOAD-18]
_ = x[BPF_BTF_GET_FD_BY_ID-19]
_ = x[BPF_TASK_FD_QUERY-20]
_ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
_ = x[BPF_MAP_FREEZE-22]
_ = x[BPF_BTF_GET_NEXT_ID-23]
_ = x[BPF_MAP_LOOKUP_BATCH-24]
_ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
_ = x[BPF_MAP_UPDATE_BATCH-26]
_ = x[BPF_MAP_DELETE_BATCH-27]
_ = x[BPF_LINK_CREATE-28]
_ = x[BPF_LINK_UPDATE-29]
_ = x[BPF_LINK_GET_FD_BY_ID-30]
_ = x[BPF_LINK_GET_NEXT_ID-31]
_ = x[BPF_ENABLE_STATS-32]
_ = x[BPF_ITER_CREATE-33]
}
const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
func (i BPFCmd) String() string {
if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
}

@ -0,0 +1,208 @@
//go:build linux
// +build linux
package unix
import (
"bytes"
"syscall"
linux "golang.org/x/sys/unix"
)
const (
ENOENT = linux.ENOENT
EEXIST = linux.EEXIST
EAGAIN = linux.EAGAIN
ENOSPC = linux.ENOSPC
EINVAL = linux.EINVAL
EPOLLIN = linux.EPOLLIN
EINTR = linux.EINTR
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
EBADF = linux.EBADF
E2BIG = linux.E2BIG
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
BPF_F_RDONLY = linux.BPF_F_RDONLY
BPF_F_WRONLY = linux.BPF_F_WRONLY
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
SYS_BPF = linux.SYS_BPF
F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
O_CLOEXEC = linux.O_CLOEXEC
O_NONBLOCK = linux.O_NONBLOCK
PROT_READ = linux.PROT_READ
PROT_WRITE = linux.PROT_WRITE
MAP_SHARED = linux.MAP_SHARED
PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
PerfBitWatermark = linux.PerfBitWatermark
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
RLIM_INFINITY = linux.RLIM_INFINITY
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
PERF_RECORD_LOST = linux.PERF_RECORD_LOST
PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
AT_FDCWD = linux.AT_FDCWD
RENAME_NOREPLACE = linux.RENAME_NOREPLACE
)
// Statfs_t is a wrapper
type Statfs_t = linux.Statfs_t
// Rlimit is a wrapper
type Rlimit = linux.Rlimit
// Syscall is a wrapper
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
return linux.Syscall(trap, a1, a2, a3)
}
// FcntlInt is a wrapper
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return linux.FcntlInt(fd, cmd, arg)
}
// IoctlSetInt is a wrapper
func IoctlSetInt(fd int, req uint, value int) error {
return linux.IoctlSetInt(fd, req, value)
}
// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) (err error) {
return linux.Statfs(path, buf)
}
// Close is a wrapper
func Close(fd int) (err error) {
return linux.Close(fd)
}
// EpollEvent is a wrapper
type EpollEvent = linux.EpollEvent
// EpollWait is a wrapper
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
return linux.EpollWait(epfd, events, msec)
}
// EpollCtl is a wrapper
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
return linux.EpollCtl(epfd, op, fd, event)
}
// Eventfd is a wrapper
func Eventfd(initval uint, flags int) (fd int, err error) {
return linux.Eventfd(initval, flags)
}
// Write is a wrapper
func Write(fd int, p []byte) (n int, err error) {
return linux.Write(fd, p)
}
// EpollCreate1 is a wrapper
func EpollCreate1(flag int) (fd int, err error) {
return linux.EpollCreate1(flag)
}
// PerfEventMmapPage is a wrapper
type PerfEventMmapPage linux.PerfEventMmapPage
// SetNonblock is a wrapper
func SetNonblock(fd int, nonblocking bool) (err error) {
return linux.SetNonblock(fd, nonblocking)
}
// Mmap is a wrapper
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return linux.Mmap(fd, offset, length, prot, flags)
}
// Munmap is a wrapper
func Munmap(b []byte) (err error) {
return linux.Munmap(b)
}
// PerfEventAttr is a wrapper
type PerfEventAttr = linux.PerfEventAttr
// PerfEventOpen is a wrapper
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
}
// Utsname is a wrapper
type Utsname = linux.Utsname
// Uname is a wrapper
func Uname(buf *Utsname) (err error) {
return linux.Uname(buf)
}
// Getpid is a wrapper
func Getpid() int {
return linux.Getpid()
}
// Gettid is a wrapper
func Gettid() int {
return linux.Gettid()
}
// Tgkill is a wrapper
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return linux.Tgkill(tgid, tid, sig)
}
// BytePtrFromString is a wrapper
func BytePtrFromString(s string) (*byte, error) {
return linux.BytePtrFromString(s)
}
// ByteSliceToString is a wrapper
func ByteSliceToString(s []byte) string {
return linux.ByteSliceToString(s)
}
// Renameat2 is a wrapper
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
}
func KernelRelease() (string, error) {
var uname Utsname
err := Uname(&uname)
if err != nil {
return "", err
}
end := bytes.IndexByte(uname.Release[:], 0)
release := string(uname.Release[:end])
return release, nil
}
func Prlimit(pid, resource int, new, old *Rlimit) error {
return linux.Prlimit(pid, resource, new, old)
}

@ -0,0 +1,267 @@
//go:build !linux
// +build !linux
package unix
import (
"fmt"
"runtime"
"syscall"
)
var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
const (
ENOENT = syscall.ENOENT
EEXIST = syscall.EEXIST
EAGAIN = syscall.EAGAIN
ENOSPC = syscall.ENOSPC
EINVAL = syscall.EINVAL
EINTR = syscall.EINTR
EPERM = syscall.EPERM
ESRCH = syscall.ESRCH
ENODEV = syscall.ENODEV
EBADF = syscall.Errno(0)
E2BIG = syscall.Errno(0)
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
BPF_F_NO_PREALLOC = 0
BPF_F_NUMA_NODE = 0
BPF_F_RDONLY = 0
BPF_F_WRONLY = 0
BPF_F_RDONLY_PROG = 0
BPF_F_WRONLY_PROG = 0
BPF_F_SLEEPABLE = 0
BPF_F_MMAPABLE = 0
BPF_F_INNER_MAP = 0
BPF_OBJ_NAME_LEN = 0x10
BPF_TAG_SIZE = 0x8
BPF_RINGBUF_BUSY_BIT = 0
BPF_RINGBUF_DISCARD_BIT = 0
BPF_RINGBUF_HDR_SZ = 0
SYS_BPF = 321
F_DUPFD_CLOEXEC = 0x406
EPOLLIN = 0x1
EPOLL_CTL_ADD = 0x1
EPOLL_CLOEXEC = 0x80000
O_CLOEXEC = 0x80000
O_NONBLOCK = 0x800
PROT_READ = 0x1
PROT_WRITE = 0x2
MAP_SHARED = 0x1
PERF_ATTR_SIZE_VER1 = 0
PERF_TYPE_SOFTWARE = 0x1
PERF_TYPE_TRACEPOINT = 0
PERF_COUNT_SW_BPF_OUTPUT = 0xa
PERF_EVENT_IOC_DISABLE = 0
PERF_EVENT_IOC_ENABLE = 0
PERF_EVENT_IOC_SET_BPF = 0
PerfBitWatermark = 0x4000
PERF_SAMPLE_RAW = 0x400
PERF_FLAG_FD_CLOEXEC = 0x8
RLIM_INFINITY = 0x7fffffffffffffff
RLIMIT_MEMLOCK = 8
BPF_STATS_RUN_TIME = 0
PERF_RECORD_LOST = 2
PERF_RECORD_SAMPLE = 9
AT_FDCWD = -0x2
RENAME_NOREPLACE = 0x1
)
// Statfs_t is a wrapper
type Statfs_t struct {
Type int64
Bsize int64
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid [2]int32
Namelen int64
Frsize int64
Flags int64
Spare [4]int64
}
// Rlimit is a wrapper
type Rlimit struct {
Cur uint64
Max uint64
}
// Syscall is a wrapper
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
return 0, 0, syscall.Errno(1)
}
// FcntlInt is a wrapper
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return -1, errNonLinux
}
// IoctlSetInt is a wrapper
func IoctlSetInt(fd int, req uint, value int) error {
return errNonLinux
}
// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) error {
return errNonLinux
}
// Close is a wrapper
func Close(fd int) (err error) {
return errNonLinux
}
// EpollEvent is a wrapper
type EpollEvent struct {
Events uint32
Fd int32
Pad int32
}
// EpollWait is a wrapper
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
return 0, errNonLinux
}
// EpollCtl is a wrapper
func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
return errNonLinux
}
// Eventfd is a wrapper
func Eventfd(initval uint, flags int) (fd int, err error) {
return 0, errNonLinux
}
// Write is a wrapper
func Write(fd int, p []byte) (n int, err error) {
return 0, errNonLinux
}
// EpollCreate1 is a wrapper
func EpollCreate1(flag int) (fd int, err error) {
return 0, errNonLinux
}
// PerfEventMmapPage is a wrapper
type PerfEventMmapPage struct {
Version uint32
Compat_version uint32
Lock uint32
Index uint32
Offset int64
Time_enabled uint64
Time_running uint64
Capabilities uint64
Pmc_width uint16
Time_shift uint16
Time_mult uint32
Time_offset uint64
Time_zero uint64
Size uint32
Data_head uint64
Data_tail uint64
Data_offset uint64
Data_size uint64
Aux_head uint64
Aux_tail uint64
Aux_offset uint64
Aux_size uint64
}
// SetNonblock is a wrapper
func SetNonblock(fd int, nonblocking bool) (err error) {
return errNonLinux
}
// Mmap is a wrapper
func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
return []byte{}, errNonLinux
}
// Munmap is a wrapper
func Munmap(b []byte) (err error) {
return errNonLinux
}
// PerfEventAttr is a wrapper
type PerfEventAttr struct {
Type uint32
Size uint32
Config uint64
Sample uint64
Sample_type uint64
Read_format uint64
Bits uint64
Wakeup uint32
Bp_type uint32
Ext1 uint64
Ext2 uint64
Branch_sample_type uint64
Sample_regs_user uint64
Sample_stack_user uint32
Clockid int32
Sample_regs_intr uint64
Aux_watermark uint32
Sample_max_stack uint16
}
// PerfEventOpen is a wrapper
func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
return 0, errNonLinux
}
// Utsname is a wrapper
type Utsname struct {
Release [65]byte
Version [65]byte
}
// Uname is a wrapper
func Uname(buf *Utsname) (err error) {
return errNonLinux
}
// Getpid is a wrapper
func Getpid() int {
return -1
}
// Gettid is a wrapper
func Gettid() int {
return -1
}
// Tgkill is a wrapper
func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return errNonLinux
}
// BytePtrFromString is a wrapper
func BytePtrFromString(s string) (*byte, error) {
return nil, errNonLinux
}
// ByteSliceToString is a wrapper
func ByteSliceToString(s []byte) string {
return ""
}
// Renameat2 is a wrapper
func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
return errNonLinux
}
func KernelRelease() (string, error) {
return "", errNonLinux
}
func Prlimit(pid, resource int, new, old *Rlimit) error {
return errNonLinux
}

163
vendor/github.com/cilium/ebpf/internal/version.go generated vendored Normal file

@ -0,0 +1,163 @@
package internal
import (
"fmt"
"os"
"regexp"
"sync"
"github.com/cilium/ebpf/internal/unix"
)
const (
// Version constant used in ELF binaries indicating that the loader needs to
// substitute the eBPF program's version with the value of the kernel's
// KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf
// and RedSift.
MagicKernelVersion = 0xFFFFFFFE
)
var (
// Match between one and three decimals separated by dots, with the last
// segment (patch level) being optional on some kernels.
// The x.y.z string must appear at the start of a string or right after
// whitespace to prevent sequences like 'x.y.z-a.b.c' from matching 'a.b.c'.
rgxKernelVersion = regexp.MustCompile(`(?:\A|\s)\d{1,3}\.\d{1,3}(?:\.\d{1,3})?`)
kernelVersion = struct {
once sync.Once
version Version
err error
}{}
)
// A Version in the form Major.Minor.Patch.
type Version [3]uint16
// NewVersion creates a version from a string like "Major.Minor.Patch".
//
// Patch is optional.
func NewVersion(ver string) (Version, error) {
var major, minor, patch uint16
n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
if n < 2 {
return Version{}, fmt.Errorf("invalid version: %s", ver)
}
return Version{major, minor, patch}, nil
}
func (v Version) String() string {
if v[2] == 0 {
return fmt.Sprintf("v%d.%d", v[0], v[1])
}
return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
}
// Less returns true if the version is less than another version.
func (v Version) Less(other Version) bool {
for i, a := range v {
if a == other[i] {
continue
}
return a < other[i]
}
return false
}
// Unspecified returns true if the version is all zero.
func (v Version) Unspecified() bool {
return v[0] == 0 && v[1] == 0 && v[2] == 0
}
// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h.
// It represents the kernel version and patch level as a single value.
func (v Version) Kernel() uint32 {
// Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid
// overflowing into PATCHLEVEL.
// See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255").
s := v[2]
if s > 255 {
s = 255
}
// Truncate members to uint8 to prevent them from spilling over into
// each other when overflowing 8 bits.
return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s))
}
// KernelVersion returns the version of the currently running kernel.
func KernelVersion() (Version, error) {
kernelVersion.once.Do(func() {
kernelVersion.version, kernelVersion.err = detectKernelVersion()
})
if kernelVersion.err != nil {
return Version{}, kernelVersion.err
}
return kernelVersion.version, nil
}
// detectKernelVersion returns the version of the running kernel. It scans the
// following sources in order: /proc/version_signature, uname -v, uname -r.
// In each of those locations, the last-appearing x.y(.z) value is selected
// for parsing. The first location that yields a usable version number is
// returned.
func detectKernelVersion() (Version, error) {
// Try reading /proc/version_signature for Ubuntu compatibility.
// Example format: Ubuntu 4.15.0-91.92-generic 4.15.18
// This method exists in the kernel itself, see d18acd15c
// ("perf tools: Fix kernel version error in ubuntu").
if pvs, err := os.ReadFile("/proc/version_signature"); err == nil {
// If /proc/version_signature exists, failing to parse it is an error.
// It only exists on Ubuntu, where the real patch level is not obtainable
// through any other method.
v, err := findKernelVersion(string(pvs))
if err != nil {
return Version{}, err
}
return v, nil
}
var uname unix.Utsname
if err := unix.Uname(&uname); err != nil {
return Version{}, fmt.Errorf("calling uname: %w", err)
}
// Debian puts the version including the patch level in uname.Version.
// It is not an error if there's no version number in uname.Version,
// as most distributions don't use it. Parsing can continue on uname.Release.
// Example format: #1 SMP Debian 4.19.37-5+deb10u2 (2019-08-08)
if v, err := findKernelVersion(unix.ByteSliceToString(uname.Version[:])); err == nil {
return v, nil
}
// Most other distributions have the full kernel version including patch
// level in uname.Release.
// Example format: 4.19.0-5-amd64, 5.5.10-arch1-1
v, err := findKernelVersion(unix.ByteSliceToString(uname.Release[:]))
if err != nil {
return Version{}, err
}
return v, nil
}
// findKernelVersion matches s against rgxKernelVersion and parses the result
// into a Version. If s contains multiple matches, the last entry is selected.
func findKernelVersion(s string) (Version, error) {
m := rgxKernelVersion.FindAllString(s, -1)
if m == nil {
return Version{}, fmt.Errorf("no kernel version in string: %s", s)
}
// Pick the last match of the string in case there are multiple.
s = m[len(m)-1]
v, err := NewVersion(s)
if err != nil {
return Version{}, fmt.Errorf("parsing version string %s: %w", s, err)
}
return v, nil
}

171
vendor/github.com/cilium/ebpf/link/cgroup.go generated vendored Normal file

@ -0,0 +1,171 @@
package link
import (
"errors"
"fmt"
"os"
"github.com/cilium/ebpf"
)
type cgroupAttachFlags uint32
// cgroup attach flags
const (
flagAllowOverride cgroupAttachFlags = 1 << iota
flagAllowMulti
flagReplace
)
type CgroupOptions struct {
// Path to a cgroupv2 folder.
Path string
// One of the AttachCgroup* constants
Attach ebpf.AttachType
// Program must be of type CGroup*, and the attach type must match Attach.
Program *ebpf.Program
}
// AttachCgroup links a BPF program to a cgroup.
func AttachCgroup(opts CgroupOptions) (Link, error) {
cgroup, err := os.Open(opts.Path)
if err != nil {
return nil, fmt.Errorf("can't open cgroup: %s", err)
}
clone, err := opts.Program.Clone()
if err != nil {
cgroup.Close()
return nil, err
}
var cg Link
cg, err = newLinkCgroup(cgroup, opts.Attach, clone)
if errors.Is(err, ErrNotSupported) {
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti)
}
if errors.Is(err, ErrNotSupported) {
cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride)
}
if err != nil {
cgroup.Close()
clone.Close()
return nil, err
}
return cg, nil
}
// LoadPinnedCgroup loads a pinned cgroup from a bpffs.
func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
link, err := LoadPinnedRawLink(fileName, CgroupType, opts)
if err != nil {
return nil, err
}
return &linkCgroup{*link}, nil
}
type progAttachCgroup struct {
cgroup *os.File
current *ebpf.Program
attachType ebpf.AttachType
flags cgroupAttachFlags
}
var _ Link = (*progAttachCgroup)(nil)
func (cg *progAttachCgroup) isLink() {}
func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
if flags&flagAllowMulti > 0 {
if err := haveProgAttachReplace(); err != nil {
return nil, fmt.Errorf("can't support multiple programs: %w", err)
}
}
err := RawAttachProgram(RawAttachProgramOptions{
Target: int(cgroup.Fd()),
Program: prog,
Flags: uint32(flags),
Attach: attach,
})
if err != nil {
return nil, fmt.Errorf("cgroup: %w", err)
}
return &progAttachCgroup{cgroup, prog, attach, flags}, nil
}
func (cg *progAttachCgroup) Close() error {
defer cg.cgroup.Close()
defer cg.current.Close()
err := RawDetachProgram(RawDetachProgramOptions{
Target: int(cg.cgroup.Fd()),
Program: cg.current,
Attach: cg.attachType,
})
if err != nil {
return fmt.Errorf("close cgroup: %s", err)
}
return nil
}
func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
new, err := prog.Clone()
if err != nil {
return err
}
args := RawAttachProgramOptions{
Target: int(cg.cgroup.Fd()),
Program: prog,
Attach: cg.attachType,
Flags: uint32(cg.flags),
}
if cg.flags&flagAllowMulti > 0 {
// Atomically replacing multiple programs requires at least
// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
// program in MULTI mode")
args.Flags |= uint32(flagReplace)
args.Replace = cg.current
}
if err := RawAttachProgram(args); err != nil {
new.Close()
return fmt.Errorf("can't update cgroup: %s", err)
}
cg.current.Close()
cg.current = new
return nil
}
func (cg *progAttachCgroup) Pin(string) error {
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
}
func (cg *progAttachCgroup) Unpin() error {
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
}
type linkCgroup struct {
RawLink
}
var _ Link = (*linkCgroup)(nil)
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
link, err := AttachRawLink(RawLinkOptions{
Target: int(cgroup.Fd()),
Program: prog,
Attach: attach,
})
if err != nil {
return nil, err
}
return &linkCgroup{*link}, err
}

2
vendor/github.com/cilium/ebpf/link/doc.go generated vendored Normal file

@ -0,0 +1,2 @@
// Package link allows attaching eBPF programs to various kernel hooks.
package link

88
vendor/github.com/cilium/ebpf/link/freplace.go generated vendored Normal file

@ -0,0 +1,88 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal/btf"
)
type FreplaceLink struct {
RawLink
}
// AttachFreplace attaches the given eBPF program to the function it replaces.
//
// The program and name can either be provided at link time, or can be provided
// at program load time. If they were provided at load time, they should be nil
// and empty respectively here, as they will be ignored by the kernel.
// Examples:
//
// AttachFreplace(dispatcher, "function", replacement)
// AttachFreplace(nil, "", replacement)
func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (*FreplaceLink, error) {
if (name == "") != (targetProg == nil) {
return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput)
}
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
if prog.Type() != ebpf.Extension {
return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput)
}
var (
target int
typeID btf.TypeID
)
if targetProg != nil {
info, err := targetProg.Info()
if err != nil {
return nil, err
}
btfID, ok := info.BTFID()
if !ok {
return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput)
}
btfHandle, err := btf.NewHandleFromID(btfID)
if err != nil {
return nil, err
}
defer btfHandle.Close()
var function *btf.Func
if err := btfHandle.Spec().FindType(name, &function); err != nil {
return nil, err
}
target = targetProg.FD()
typeID = function.ID()
}
link, err := AttachRawLink(RawLinkOptions{
Target: target,
Program: prog,
Attach: ebpf.AttachNone,
BTF: typeID,
})
if err != nil {
return nil, err
}
return &FreplaceLink{*link}, nil
}
// Update implements the Link interface.
func (f *FreplaceLink) Update(new *ebpf.Program) error {
return fmt.Errorf("freplace update: %w", ErrNotSupported)
}
// LoadPinnedFreplace loads a pinned iterator from a bpffs.
func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (*FreplaceLink, error) {
link, err := LoadPinnedRawLink(fileName, TracingType, opts)
if err != nil {
return nil, err
}
return &FreplaceLink{*link}, err
}

100
vendor/github.com/cilium/ebpf/link/iter.go generated vendored Normal file

@ -0,0 +1,100 @@
package link
import (
"fmt"
"io"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type IterOptions struct {
// Program must be of type Tracing with attach type
// AttachTraceIter. The kind of iterator to attach to is
// determined at load time via the AttachTo field.
//
// AttachTo requires the kernel to include BTF of itself,
// and it to be compiled with a recent pahole (>= 1.16).
Program *ebpf.Program
// Map specifies the target map for bpf_map_elem and sockmap iterators.
// It may be nil.
Map *ebpf.Map
}
// AttachIter attaches a BPF seq_file iterator.
func AttachIter(opts IterOptions) (*Iter, error) {
if err := haveBPFLink(); err != nil {
return nil, err
}
progFd := opts.Program.FD()
if progFd < 0 {
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
var info bpfIterLinkInfoMap
if opts.Map != nil {
mapFd := opts.Map.FD()
if mapFd < 0 {
return nil, fmt.Errorf("invalid map: %w", internal.ErrClosedFd)
}
info.map_fd = uint32(mapFd)
}
attr := bpfLinkCreateIterAttr{
prog_fd: uint32(progFd),
attach_type: ebpf.AttachTraceIter,
iter_info: internal.NewPointer(unsafe.Pointer(&info)),
iter_info_len: uint32(unsafe.Sizeof(info)),
}
fd, err := bpfLinkCreateIter(&attr)
if err != nil {
return nil, fmt.Errorf("can't link iterator: %w", err)
}
return &Iter{RawLink{fd, ""}}, err
}
// LoadPinnedIter loads a pinned iterator from a bpffs.
func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) {
link, err := LoadPinnedRawLink(fileName, IterType, opts)
if err != nil {
return nil, err
}
return &Iter{*link}, err
}
// Iter represents an attached bpf_iter.
type Iter struct {
RawLink
}
// Open creates a new instance of the iterator.
//
// Reading from the returned reader triggers the BPF program.
func (it *Iter) Open() (io.ReadCloser, error) {
linkFd, err := it.fd.Value()
if err != nil {
return nil, err
}
attr := &bpfIterCreateAttr{
linkFd: linkFd,
}
fd, err := bpfIterCreate(attr)
if err != nil {
return nil, fmt.Errorf("can't create iterator: %w", err)
}
return fd.File("bpf_iter"), nil
}
// union bpf_iter_link_info.map
type bpfIterLinkInfoMap struct {
map_fd uint32
}

444
vendor/github.com/cilium/ebpf/link/kprobe.go generated vendored Normal file

@ -0,0 +1,444 @@
package link
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
)
var (
kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
kprobeRetprobeBit = struct {
once sync.Once
value uint64
err error
}{}
)
type probeType uint8
const (
kprobeType probeType = iota
uprobeType
)
func (pt probeType) String() string {
if pt == kprobeType {
return "kprobe"
}
return "uprobe"
}
func (pt probeType) EventsPath() string {
if pt == kprobeType {
return kprobeEventsPath
}
return uprobeEventsPath
}
func (pt probeType) PerfEventType(ret bool) perfEventType {
if pt == kprobeType {
if ret {
return kretprobeEvent
}
return kprobeEvent
}
if ret {
return uretprobeEvent
}
return uprobeEvent
}
func (pt probeType) RetprobeBit() (uint64, error) {
if pt == kprobeType {
return kretprobeBit()
}
return uretprobeBit()
}
// Kprobe attaches the given eBPF program to a perf event that fires when the
// given kernel symbol starts executing. See /proc/kallsyms for available
// symbols. For example, printk():
//
// kp, err := Kprobe("printk", prog)
//
// Losing the reference to the resulting Link (kp) will close the Kprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
func Kprobe(symbol string, prog *ebpf.Program) (Link, error) {
k, err := kprobe(symbol, prog, false)
if err != nil {
return nil, err
}
err = k.attach(prog)
if err != nil {
k.Close()
return nil, err
}
return k, nil
}
// Kretprobe attaches the given eBPF program to a perf event that fires right
// before the given kernel symbol exits, with the function stack left intact.
// See /proc/kallsyms for available symbols. For example, printk():
//
// kp, err := Kretprobe("printk", prog)
//
// Losing the reference to the resulting Link (kp) will close the Kretprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) {
k, err := kprobe(symbol, prog, true)
if err != nil {
return nil, err
}
err = k.attach(prog)
if err != nil {
k.Close()
return nil, err
}
return k, nil
}
// kprobe opens a perf event on the given symbol and attaches prog to it.
// If ret is true, create a kretprobe.
func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
if symbol == "" {
return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
}
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
if !rgxTraceEvent.MatchString(symbol) {
return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput)
}
if prog.Type() != ebpf.Kprobe {
return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
}
// Use kprobe PMU if the kernel has it available.
tp, err := pmuKprobe(platformPrefix(symbol), ret)
if errors.Is(err, os.ErrNotExist) {
tp, err = pmuKprobe(symbol, ret)
}
if err == nil {
return tp, nil
}
if err != nil && !errors.Is(err, ErrNotSupported) {
return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
}
// Use tracefs if kprobe PMU is missing.
tp, err = tracefsKprobe(platformPrefix(symbol), ret)
if errors.Is(err, os.ErrNotExist) {
tp, err = tracefsKprobe(symbol, ret)
}
if err != nil {
return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
}
return tp, nil
}
// pmuKprobe opens a perf event based on the kprobe PMU.
// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
return pmuProbe(kprobeType, symbol, "", 0, perfAllThreads, ret)
}
// pmuProbe opens a perf event based on a Performance Monitoring Unit.
//
// Requires at least a 4.17 kernel.
// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
//
// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) {
// Getting the PMU type will fail if the kernel doesn't support
// the perf_[k,u]probe PMU.
et, err := getPMUEventType(typ)
if err != nil {
return nil, err
}
var config uint64
if ret {
bit, err := typ.RetprobeBit()
if err != nil {
return nil, err
}
config |= 1 << bit
}
var (
attr unix.PerfEventAttr
sp unsafe.Pointer
)
switch typ {
case kprobeType:
// Create a pointer to a NUL-terminated string for the kernel.
sp, err = unsafeStringPtr(symbol)
if err != nil {
return nil, err
}
attr = unix.PerfEventAttr{
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
Config: config, // Retprobe flag
}
case uprobeType:
sp, err = unsafeStringPtr(path)
if err != nil {
return nil, err
}
attr = unix.PerfEventAttr{
// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
// since it added the config2 (Ext2) field. The Size field controls the
// size of the internal buffer the kernel allocates for reading the
// perf_event_attr argument from userspace.
Size: unix.PERF_ATTR_SIZE_VER1,
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Uprobe path
Ext2: offset, // Uprobe offset
Config: config, // Retprobe flag
}
}
fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
// is returned to the caller.
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist)
}
// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
// when attempting to set a uprobe on a trap instruction.
if errors.Is(err, unix.ENOTSUPP) {
return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", offset, err)
}
if err != nil {
return nil, fmt.Errorf("opening perf event: %w", err)
}
// Ensure the string pointer is not collected before PerfEventOpen returns.
runtime.KeepAlive(sp)
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
return &perfEvent{
fd: internal.NewFD(uint32(fd)),
pmuID: et,
name: symbol,
typ: typ.PerfEventType(ret),
}, nil
}
// tracefsKprobe creates a Kprobe tracefs entry.
func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
return tracefsProbe(kprobeType, symbol, "", 0, perfAllThreads, ret)
}
// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
// A new trace event group name is generated on every call to support creating
// multiple trace events for the same kernel or userspace symbol.
// Path and offset are only set in the case of uprobe(s) and are used to set
// the executable/library path on the filesystem and the offset where the probe is inserted.
// A perf event is then opened on the newly-created trace event and returned to the caller.
func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) {
// Generate a random string for each trace event we attempt to create.
// This value is used as the 'group' token in tracefs to allow creating
// multiple kprobe trace events with the same name.
group, err := randomGroup("ebpf")
if err != nil {
return nil, fmt.Errorf("randomizing group name: %w", err)
}
// Before attempting to create a trace event through tracefs,
// check if an event with the same group and name already exists.
// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
// entry, so we need to rely on reads for detecting uniqueness.
_, err = getTraceEventID(group, symbol)
if err == nil {
return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol)
}
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err)
}
// Create the [k,u]probe trace event using tracefs.
if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil {
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
}
// Get the newly-created trace event's id.
tid, err := getTraceEventID(group, symbol)
if err != nil {
return nil, fmt.Errorf("getting trace event id: %w", err)
}
// Kprobes are ephemeral tracepoints and share the same perf event type.
fd, err := openTracepointPerfEvent(tid, pid)
if err != nil {
return nil, err
}
return &perfEvent{
fd: fd,
group: group,
name: symbol,
tracefsID: tid,
typ: typ.PerfEventType(ret),
}, nil
}
// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
// if a probe with the same group and symbol already exists.
func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error {
// Open the kprobe_events file in tracefs.
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
}
defer f.Close()
var pe string
switch typ {
case kprobeType:
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
// -:[GRP/]EVENT : Clear a probe
//
// Some examples:
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
//
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
// kernel default to NR_CPUS. This is desired in most eBPF cases since
// subsampling or rate limiting logic can be more accurately implemented in
// the eBPF program itself.
// See Documentation/kprobes.txt for more details.
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol)
case uprobeType:
// The uprobe_events syntax is as follows:
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
// -:[GRP/]EVENT : Clear a probe
//
// Some examples:
// r:ebpf_1234/readline /bin/bash:0x12345
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345
//
// See Documentation/trace/uprobetracer.txt for more details.
pathOffset := uprobePathOffset(path, offset)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset)
}
_, err = f.WriteString(pe)
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
// is returned to the caller.
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist)
}
if err != nil {
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
}
return nil
}
// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
// from <tracefs>/[k,u]probe_events.
func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
}
defer f.Close()
// See [k,u]probe_events syntax above. The probe type does not need to be specified
// for removals.
pe := fmt.Sprintf("-:%s/%s", group, symbol)
if _, err = f.WriteString(pe); err != nil {
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
}
return nil
}
// randomGroup generates a pseudorandom string for use as a tracefs group name.
// Returns an error when the output string would exceed 63 characters (kernel
// limitation), when rand.Read() fails or when prefix contains characters not
// allowed by rgxTraceEvent.
func randomGroup(prefix string) (string, error) {
if !rgxTraceEvent.MatchString(prefix) {
return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
}
b := make([]byte, 8)
if _, err := rand.Read(b); err != nil {
return "", fmt.Errorf("reading random bytes: %w", err)
}
group := fmt.Sprintf("%s_%x", prefix, b)
if len(group) > 63 {
return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
}
return group, nil
}
func probePrefix(ret bool) string {
if ret {
return "r"
}
return "p"
}
// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
// from /sys/bus/event_source/devices/<pmu>/format/retprobe.
func determineRetprobeBit(typ probeType) (uint64, error) {
p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
data, err := os.ReadFile(p)
if err != nil {
return 0, err
}
var rp uint64
n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
if err != nil {
return 0, fmt.Errorf("parse retprobe bit: %w", err)
}
if n != 1 {
return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
}
return rp, nil
}
func kretprobeBit() (uint64, error) {
kprobeRetprobeBit.once.Do(func() {
kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
})
return kprobeRetprobeBit.value, kprobeRetprobeBit.err
}

233
vendor/github.com/cilium/ebpf/link/link.go generated vendored Normal file

@ -0,0 +1,233 @@
package link
import (
"fmt"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
)
var ErrNotSupported = internal.ErrNotSupported
// Link represents a Program attached to a BPF hook.
type Link interface {
// Replace the current program with a new program.
//
// Passing a nil program is an error. May return an error wrapping ErrNotSupported.
Update(*ebpf.Program) error
// Persist a link by pinning it into a bpffs.
//
// May return an error wrapping ErrNotSupported.
Pin(string) error
// Undo a previous call to Pin.
//
// May return an error wrapping ErrNotSupported.
Unpin() error
// Close frees resources.
//
// The link will be broken unless it has been successfully pinned.
// A link may continue past the lifetime of the process if Close is
// not called.
Close() error
// Prevent external users from implementing this interface.
isLink()
}
// ID uniquely identifies a BPF link.
type ID uint32
// RawLinkOptions control the creation of a raw link.
type RawLinkOptions struct {
// File descriptor to attach to. This differs for each attach type.
Target int
// Program to attach.
Program *ebpf.Program
// Attach must match the attach type of Program.
Attach ebpf.AttachType
// BTF is the BTF of the attachment target.
BTF btf.TypeID
}
// RawLinkInfo contains metadata on a link.
type RawLinkInfo struct {
Type Type
ID ID
Program ebpf.ProgramID
}
// RawLink is the low-level API to bpf_link.
//
// You should consider using the higher level interfaces in this
// package instead.
type RawLink struct {
fd *internal.FD
pinnedPath string
}
// AttachRawLink creates a raw link.
func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
if err := haveBPFLink(); err != nil {
return nil, err
}
if opts.Target < 0 {
return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd)
}
progFd := opts.Program.FD()
if progFd < 0 {
return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
attr := bpfLinkCreateAttr{
targetFd: uint32(opts.Target),
progFd: uint32(progFd),
attachType: opts.Attach,
targetBTFID: uint32(opts.BTF),
}
fd, err := bpfLinkCreate(&attr)
if err != nil {
return nil, fmt.Errorf("can't create link: %s", err)
}
return &RawLink{fd, ""}, nil
}
// LoadPinnedRawLink loads a persisted link from a bpffs.
//
// Returns an error if the pinned link type doesn't match linkType. Pass
// UnspecifiedType to disable this behaviour.
func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) {
fd, err := internal.BPFObjGet(fileName, opts.Marshal())
if err != nil {
return nil, fmt.Errorf("load pinned link: %w", err)
}
link := &RawLink{fd, fileName}
if linkType == UnspecifiedType {
return link, nil
}
info, err := link.Info()
if err != nil {
link.Close()
return nil, fmt.Errorf("get pinned link info: %s", err)
}
if info.Type != linkType {
link.Close()
return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType)
}
return link, nil
}
func (l *RawLink) isLink() {}
// FD returns the raw file descriptor.
func (l *RawLink) FD() int {
fd, err := l.fd.Value()
if err != nil {
return -1
}
return int(fd)
}
// Close breaks the link.
//
// Use Pin if you want to make the link persistent.
func (l *RawLink) Close() error {
return l.fd.Close()
}
// Pin persists a link past the lifetime of the process.
//
// Calling Close on a pinned Link will not break the link
// until the pin is removed.
func (l *RawLink) Pin(fileName string) error {
if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil {
return err
}
l.pinnedPath = fileName
return nil
}
// Unpin implements the Link interface.
func (l *RawLink) Unpin() error {
if err := internal.Unpin(l.pinnedPath); err != nil {
return err
}
l.pinnedPath = ""
return nil
}
// Update implements the Link interface.
func (l *RawLink) Update(new *ebpf.Program) error {
return l.UpdateArgs(RawLinkUpdateOptions{
New: new,
})
}
// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
type RawLinkUpdateOptions struct {
New *ebpf.Program
Old *ebpf.Program
Flags uint32
}
// UpdateArgs updates a link based on args.
func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
newFd := opts.New.FD()
if newFd < 0 {
return fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
}
var oldFd int
if opts.Old != nil {
oldFd = opts.Old.FD()
if oldFd < 0 {
return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd)
}
}
linkFd, err := l.fd.Value()
if err != nil {
return fmt.Errorf("can't update link: %s", err)
}
attr := bpfLinkUpdateAttr{
linkFd: linkFd,
newProgFd: uint32(newFd),
oldProgFd: uint32(oldFd),
flags: opts.Flags,
}
return bpfLinkUpdate(&attr)
}
// struct bpf_link_info
type bpfLinkInfo struct {
typ uint32
id uint32
prog_id uint32
}
// Info returns metadata about the link.
func (l *RawLink) Info() (*RawLinkInfo, error) {
var info bpfLinkInfo
err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
if err != nil {
return nil, fmt.Errorf("link info: %s", err)
}
return &RawLinkInfo{
Type(info.typ),
ID(info.id),
ebpf.ProgramID(info.prog_id),
}, nil
}

60
vendor/github.com/cilium/ebpf/link/netns.go generated vendored Normal file

@ -0,0 +1,60 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
)
// NetNsInfo contains metadata about a network namespace link.
type NetNsInfo struct {
RawLinkInfo
}
// NetNsLink is a program attached to a network namespace.
type NetNsLink struct {
*RawLink
}
// AttachNetNs attaches a program to a network namespace.
func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
var attach ebpf.AttachType
switch t := prog.Type(); t {
case ebpf.FlowDissector:
attach = ebpf.AttachFlowDissector
case ebpf.SkLookup:
attach = ebpf.AttachSkLookup
default:
return nil, fmt.Errorf("can't attach %v to network namespace", t)
}
link, err := AttachRawLink(RawLinkOptions{
Target: ns,
Program: prog,
Attach: attach,
})
if err != nil {
return nil, err
}
return &NetNsLink{link}, nil
}
// LoadPinnedNetNs loads a network namespace link from bpffs.
func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) {
link, err := LoadPinnedRawLink(fileName, NetNsType, opts)
if err != nil {
return nil, err
}
return &NetNsLink{link}, nil
}
// Info returns information about the link.
func (nns *NetNsLink) Info() (*NetNsInfo, error) {
info, err := nns.RawLink.Info()
if err != nil {
return nil, err
}
return &NetNsInfo{*info}, nil
}

272
vendor/github.com/cilium/ebpf/link/perf_event.go generated vendored Normal file

@ -0,0 +1,272 @@
package link
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/unix"
)
// Getting the terminology right is usually the hardest part. For posterity and
// for staying sane during implementation:
//
// - trace event: Representation of a kernel runtime hook. Filesystem entries
// under <tracefs>/events. Can be tracepoints (static), kprobes or uprobes.
// Can be instantiated into perf events (see below).
// - tracepoint: A predetermined hook point in the kernel. Exposed as trace
// events in (sub)directories under <tracefs>/events. Cannot be closed or
// removed, they are static.
// - k(ret)probe: Ephemeral trace events based on entry or exit points of
// exported kernel symbols. kprobe-based (tracefs) trace events can be
// created system-wide by writing to the <tracefs>/kprobe_events file, or
// they can be scoped to the current process by creating PMU perf events.
// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
// and offsets. uprobe-based (tracefs) trace events can be
// created system-wide by writing to the <tracefs>/uprobe_events file, or
// they can be scoped to the current process by creating PMU perf events.
// - perf event: An object instantiated based on an existing trace event or
// kernel symbol. Referred to by fd in userspace.
// Exactly one eBPF program can be attached to a perf event. Multiple perf
// events can be created from a single trace event. Closing a perf event
// stops any further invocations of the attached eBPF program.
var (
tracefsPath = "/sys/kernel/debug/tracing"
// Trace event groups, names and kernel symbols must adhere to this set
// of characters. Non-empty, first character must not be a number, all
// characters must be alphanumeric or underscore.
rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$")
errInvalidInput = errors.New("invalid input")
)
const (
perfAllThreads = -1
)
type perfEventType uint8
const (
tracepointEvent perfEventType = iota
kprobeEvent
kretprobeEvent
uprobeEvent
uretprobeEvent
)
// A perfEvent represents a perf event kernel object. Exactly one eBPF program
// can be attached to it. It is created based on a tracefs trace event or a
// Performance Monitoring Unit (PMU).
type perfEvent struct {
// Group and name of the tracepoint/kprobe/uprobe.
group string
name string
// PMU event ID read from sysfs. Valid IDs are non-zero.
pmuID uint64
// ID of the trace event read from tracefs. Valid IDs are non-zero.
tracefsID uint64
// The event type determines the types of programs that can be attached.
typ perfEventType
fd *internal.FD
}
func (pe *perfEvent) isLink() {}
func (pe *perfEvent) Pin(string) error {
return fmt.Errorf("pin perf event: %w", ErrNotSupported)
}
func (pe *perfEvent) Unpin() error {
return fmt.Errorf("unpin perf event: %w", ErrNotSupported)
}
// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
// owned by the perf event, which means multiple programs can be attached
// simultaneously.
//
// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
// returns EEXIST.
//
// Detaching a program from a perf event is currently not possible, so a
// program replacement mechanism cannot be implemented for perf events.
func (pe *perfEvent) Update(prog *ebpf.Program) error {
return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported)
}
func (pe *perfEvent) Close() error {
if pe.fd == nil {
return nil
}
pfd, err := pe.fd.Value()
if err != nil {
return fmt.Errorf("getting perf event fd: %w", err)
}
err = unix.IoctlSetInt(int(pfd), unix.PERF_EVENT_IOC_DISABLE, 0)
if err != nil {
return fmt.Errorf("disabling perf event: %w", err)
}
err = pe.fd.Close()
if err != nil {
return fmt.Errorf("closing perf event fd: %w", err)
}
switch pe.typ {
case kprobeEvent, kretprobeEvent:
// Clean up kprobe tracefs entry.
if pe.tracefsID != 0 {
return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
}
case uprobeEvent, uretprobeEvent:
// Clean up uprobe tracefs entry.
if pe.tracefsID != 0 {
return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
}
case tracepointEvent:
// Tracepoint trace events don't hold any extra resources.
return nil
}
return nil
}
// attach the given eBPF prog to the perf event stored in pe.
// pe must contain a valid perf event fd.
// prog's type must match the program type stored in pe.
func (pe *perfEvent) attach(prog *ebpf.Program) error {
if prog == nil {
return errors.New("cannot attach a nil program")
}
if pe.fd == nil {
return errors.New("cannot attach to nil perf event")
}
if prog.FD() < 0 {
return fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
}
switch pe.typ {
case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
if t := prog.Type(); t != ebpf.Kprobe {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
}
case tracepointEvent:
if t := prog.Type(); t != ebpf.TracePoint {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
}
default:
return fmt.Errorf("unknown perf event type: %d", pe.typ)
}
// The ioctl below will fail when the fd is invalid.
kfd, _ := pe.fd.Value()
// Assign the eBPF program to the perf event.
err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
if err != nil {
return fmt.Errorf("setting perf event bpf program: %w", err)
}
// PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
return fmt.Errorf("enable perf event: %s", err)
}
// Close the perf event when its reference is lost to avoid leaking system resources.
runtime.SetFinalizer(pe, (*perfEvent).Close)
return nil
}
// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
func unsafeStringPtr(str string) (unsafe.Pointer, error) {
p, err := unix.BytePtrFromString(str)
if err != nil {
return nil, err
}
return unsafe.Pointer(p), nil
}
// getTraceEventID reads a trace event's ID from tracefs given its group and name.
// group and name must be alphanumeric or underscore, as required by the kernel.
func getTraceEventID(group, name string) (uint64, error) {
tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
}
if err != nil {
return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
}
return tid, nil
}
// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
// from /sys/bus/event_source/devices/<pmu>/type.
//
// Returns ErrNotSupported if the pmu type is not supported.
func getPMUEventType(typ probeType) (uint64, error) {
et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type")
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported)
}
if err != nil {
return 0, fmt.Errorf("reading pmu type %s: %w", typ, err)
}
return et, nil
}
// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
// [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
// behind the scenes, and can be attached to using these perf events.
func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) {
attr := unix.PerfEventAttr{
Type: unix.PERF_TYPE_TRACEPOINT,
Config: tid,
Sample_type: unix.PERF_SAMPLE_RAW,
Sample: 1,
Wakeup: 1,
}
fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
if err != nil {
return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
}
return internal.NewFD(uint32(fd)), nil
}
// uint64FromFile reads a uint64 from a file. All elements of path are sanitized
// and joined onto base. Returns error if base no longer prefixes the path after
// joining all components.
func uint64FromFile(base string, path ...string) (uint64, error) {
l := filepath.Join(path...)
p := filepath.Join(base, l)
if !strings.HasPrefix(p, base) {
return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
}
data, err := os.ReadFile(p)
if err != nil {
return 0, fmt.Errorf("reading file %s: %w", p, err)
}
et := bytes.TrimSpace(data)
return strconv.ParseUint(string(et), 10, 64)
}

25
vendor/github.com/cilium/ebpf/link/platform.go generated vendored Normal file

@ -0,0 +1,25 @@
package link
import (
"fmt"
"runtime"
)
func platformPrefix(symbol string) string {
prefix := runtime.GOARCH
// per https://github.com/golang/go/blob/master/src/go/build/syslist.go
switch prefix {
case "386":
prefix = "ia32"
case "amd64", "amd64p32":
prefix = "x64"
case "arm64", "arm64be":
prefix = "arm64"
default:
return symbol
}
return fmt.Sprintf("__%s_%s", prefix, symbol)
}

76
vendor/github.com/cilium/ebpf/link/program.go generated vendored Normal file

@ -0,0 +1,76 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type RawAttachProgramOptions struct {
// File descriptor to attach to. This differs for each attach type.
Target int
// Program to attach.
Program *ebpf.Program
// Program to replace (cgroups).
Replace *ebpf.Program
// Attach must match the attach type of Program (and Replace).
Attach ebpf.AttachType
// Flags control the attach behaviour. This differs for each attach type.
Flags uint32
}
// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
//
// You should use one of the higher level abstractions available in this
// package if possible.
func RawAttachProgram(opts RawAttachProgramOptions) error {
if err := haveProgAttach(); err != nil {
return err
}
var replaceFd uint32
if opts.Replace != nil {
replaceFd = uint32(opts.Replace.FD())
}
attr := internal.BPFProgAttachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
ReplaceBpfFd: replaceFd,
AttachType: uint32(opts.Attach),
AttachFlags: uint32(opts.Flags),
}
if err := internal.BPFProgAttach(&attr); err != nil {
return fmt.Errorf("can't attach program: %w", err)
}
return nil
}
type RawDetachProgramOptions struct {
Target int
Program *ebpf.Program
Attach ebpf.AttachType
}
// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
//
// You should use one of the higher level abstractions available in this
// package if possible.
func RawDetachProgram(opts RawDetachProgramOptions) error {
if err := haveProgAttach(); err != nil {
return err
}
attr := internal.BPFProgDetachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
AttachType: uint32(opts.Attach),
}
if err := internal.BPFProgDetach(&attr); err != nil {
return fmt.Errorf("can't detach program: %w", err)
}
return nil
}

61
vendor/github.com/cilium/ebpf/link/raw_tracepoint.go generated vendored Normal file

@ -0,0 +1,61 @@
package link
import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal"
)
type RawTracepointOptions struct {
// Tracepoint name.
Name string
// Program must be of type RawTracepoint*
Program *ebpf.Program
}
// AttachRawTracepoint links a BPF program to a raw_tracepoint.
//
// Requires at least Linux 4.17.
func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
}
if opts.Program.FD() < 0 {
return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
}
fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{
name: internal.NewStringPointer(opts.Name),
fd: uint32(opts.Program.FD()),
})
if err != nil {
return nil, err
}
return &progAttachRawTracepoint{fd: fd}, nil
}
type progAttachRawTracepoint struct {
fd *internal.FD
}
var _ Link = (*progAttachRawTracepoint)(nil)
func (rt *progAttachRawTracepoint) isLink() {}
func (rt *progAttachRawTracepoint) Close() error {
return rt.fd.Close()
}
func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error {
return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported)
}
func (rt *progAttachRawTracepoint) Pin(_ string) error {
return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported)
}
func (rt *progAttachRawTracepoint) Unpin() error {
return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported)
}

Some files were not shown because too many files have changed in this diff Show More