1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
10 #include <sys/utsname.h>
12 #include <linux/filter.h>
13 #include <linux/kernel.h>
18 static bool grep(const char *buffer, const char *pattern)
20 return !!strstr(buffer, pattern);
23 static int get_vendor_id(int ifindex)
25 char ifname[IF_NAMESIZE], path[64], buf[8];
29 if (!if_indextoname(ifindex, ifname))
32 snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
34 fd = open(path, O_RDONLY);
38 len = read(fd, buf, sizeof(buf));
42 if (len >= (ssize_t)sizeof(buf))
46 return strtol(buf, NULL, 0);
49 static int get_kernel_version(void)
51 int version, subversion, patchlevel;
54 /* Return 0 on failure, and attempt to probe with empty kversion */
58 if (sscanf(utsn.release, "%d.%d.%d",
59 &version, &subversion, &patchlevel) != 3)
62 return (version << 16) + (subversion << 8) + patchlevel;
66 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
67 size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
69 struct bpf_load_program_attr xattr = {};
73 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
74 xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
76 case BPF_PROG_TYPE_KPROBE:
77 xattr.kern_version = get_kernel_version();
79 case BPF_PROG_TYPE_UNSPEC:
80 case BPF_PROG_TYPE_SOCKET_FILTER:
81 case BPF_PROG_TYPE_SCHED_CLS:
82 case BPF_PROG_TYPE_SCHED_ACT:
83 case BPF_PROG_TYPE_TRACEPOINT:
84 case BPF_PROG_TYPE_XDP:
85 case BPF_PROG_TYPE_PERF_EVENT:
86 case BPF_PROG_TYPE_CGROUP_SKB:
87 case BPF_PROG_TYPE_CGROUP_SOCK:
88 case BPF_PROG_TYPE_LWT_IN:
89 case BPF_PROG_TYPE_LWT_OUT:
90 case BPF_PROG_TYPE_LWT_XMIT:
91 case BPF_PROG_TYPE_SOCK_OPS:
92 case BPF_PROG_TYPE_SK_SKB:
93 case BPF_PROG_TYPE_CGROUP_DEVICE:
94 case BPF_PROG_TYPE_SK_MSG:
95 case BPF_PROG_TYPE_RAW_TRACEPOINT:
96 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
97 case BPF_PROG_TYPE_LIRC_MODE2:
98 case BPF_PROG_TYPE_SK_REUSEPORT:
99 case BPF_PROG_TYPE_FLOW_DISSECTOR:
104 xattr.prog_type = prog_type;
106 xattr.insns_cnt = insns_cnt;
107 xattr.license = "GPL";
108 xattr.prog_ifindex = ifindex;
110 fd = bpf_load_program_xattr(&xattr, buf, buf_len);
115 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
117 struct bpf_insn insns[2] = {
118 BPF_MOV64_IMM(BPF_REG_0, 0),
122 if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
123 /* nfp returns -EINVAL on exit(0) with TC offload */
127 probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
129 return errno != EINVAL && errno != EOPNOTSUPP;
132 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
134 int key_size, value_size, max_entries, map_flags;
135 struct bpf_create_map_attr attr = {};
136 int fd = -1, fd_inner;
138 key_size = sizeof(__u32);
139 value_size = sizeof(__u32);
144 case BPF_MAP_TYPE_STACK_TRACE:
145 value_size = sizeof(__u64);
147 case BPF_MAP_TYPE_LPM_TRIE:
148 key_size = sizeof(__u64);
149 value_size = sizeof(__u64);
150 map_flags = BPF_F_NO_PREALLOC;
152 case BPF_MAP_TYPE_CGROUP_STORAGE:
153 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
154 key_size = sizeof(struct bpf_cgroup_storage_key);
155 value_size = sizeof(__u64);
158 case BPF_MAP_TYPE_QUEUE:
159 case BPF_MAP_TYPE_STACK:
162 case BPF_MAP_TYPE_UNSPEC:
163 case BPF_MAP_TYPE_HASH:
164 case BPF_MAP_TYPE_ARRAY:
165 case BPF_MAP_TYPE_PROG_ARRAY:
166 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
167 case BPF_MAP_TYPE_PERCPU_HASH:
168 case BPF_MAP_TYPE_PERCPU_ARRAY:
169 case BPF_MAP_TYPE_CGROUP_ARRAY:
170 case BPF_MAP_TYPE_LRU_HASH:
171 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
172 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
173 case BPF_MAP_TYPE_HASH_OF_MAPS:
174 case BPF_MAP_TYPE_DEVMAP:
175 case BPF_MAP_TYPE_SOCKMAP:
176 case BPF_MAP_TYPE_CPUMAP:
177 case BPF_MAP_TYPE_XSKMAP:
178 case BPF_MAP_TYPE_SOCKHASH:
179 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
184 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
185 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
186 /* TODO: probe for device, once libbpf has a function to create
187 * map-in-map for offload
192 fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
193 sizeof(__u32), sizeof(__u32), 1, 0);
196 fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
200 /* Note: No other restriction on map type probes for offload */
201 attr.map_type = map_type;
202 attr.key_size = key_size;
203 attr.value_size = value_size;
204 attr.max_entries = max_entries;
205 attr.map_flags = map_flags;
206 attr.map_ifindex = ifindex;
208 fd = bpf_create_map_xattr(&attr);
216 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
219 struct bpf_insn insns[2] = {
226 probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
228 res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
231 switch (get_vendor_id(ifindex)) {
232 case 0x19ee: /* Netronome specific */
233 res = res && !grep(buf, "not supported by FW") &&
234 !grep(buf, "unsupported function id");