7d00d58
[openwrt/staging/blogic.git] /
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/errno.h>
6 #include <linux/jump_label.h>
7 #include <linux/rbtree.h>
8 #include <uapi/linux/bpf.h>
9
10 struct sock;
11 struct sockaddr;
12 struct cgroup;
13 struct sk_buff;
14 struct bpf_map;
15 struct bpf_prog;
16 struct bpf_sock_ops_kern;
17 struct bpf_cgroup_storage;
18
19 #ifdef CONFIG_CGROUP_BPF
20
21 extern struct static_key_false cgroup_bpf_enabled_key;
22 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
23
24 struct bpf_cgroup_storage_map;
25
26 struct bpf_storage_buffer {
27 struct rcu_head rcu;
28 char data[0];
29 };
30
31 struct bpf_cgroup_storage {
32 struct bpf_storage_buffer *buf;
33 struct bpf_cgroup_storage_map *map;
34 struct bpf_cgroup_storage_key key;
35 struct list_head list;
36 struct rb_node node;
37 struct rcu_head rcu;
38 };
39
40 struct bpf_prog_list {
41 struct list_head node;
42 struct bpf_prog *prog;
43 };
44
45 struct bpf_prog_array;
46
47 struct cgroup_bpf {
48 /* array of effective progs in this cgroup */
49 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
50
51 /* attached progs to this cgroup and attach flags
52 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
53 * have either zero or one element
54 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
55 */
56 struct list_head progs[MAX_BPF_ATTACH_TYPE];
57 u32 flags[MAX_BPF_ATTACH_TYPE];
58
59 /* temp storage for effective prog array used by prog_attach/detach */
60 struct bpf_prog_array __rcu *inactive;
61 };
62
63 void cgroup_bpf_put(struct cgroup *cgrp);
64 int cgroup_bpf_inherit(struct cgroup *cgrp);
65
66 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
67 enum bpf_attach_type type, u32 flags);
68 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
69 enum bpf_attach_type type, u32 flags);
70 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
71 union bpf_attr __user *uattr);
72
73 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
74 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
75 enum bpf_attach_type type, u32 flags);
76 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
77 enum bpf_attach_type type, u32 flags);
78 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
79 union bpf_attr __user *uattr);
80
81 int __cgroup_bpf_run_filter_skb(struct sock *sk,
82 struct sk_buff *skb,
83 enum bpf_attach_type type);
84
85 int __cgroup_bpf_run_filter_sk(struct sock *sk,
86 enum bpf_attach_type type);
87
88 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
89 struct sockaddr *uaddr,
90 enum bpf_attach_type type,
91 void *t_ctx);
92
93 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
94 struct bpf_sock_ops_kern *sock_ops,
95 enum bpf_attach_type type);
96
97 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
98 short access, enum bpf_attach_type type);
99
100 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
101 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
102 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
103 struct cgroup *cgroup,
104 enum bpf_attach_type type);
105 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
106 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
107 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
108
109 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
110 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
111 ({ \
112 int __ret = 0; \
113 if (cgroup_bpf_enabled) \
114 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
115 BPF_CGROUP_INET_INGRESS); \
116 \
117 __ret; \
118 })
119
120 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
121 ({ \
122 int __ret = 0; \
123 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
124 typeof(sk) __sk = sk_to_full_sk(sk); \
125 if (sk_fullsock(__sk)) \
126 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
127 BPF_CGROUP_INET_EGRESS); \
128 } \
129 __ret; \
130 })
131
132 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
133 ({ \
134 int __ret = 0; \
135 if (cgroup_bpf_enabled) { \
136 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
137 } \
138 __ret; \
139 })
140
141 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
142 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
143
144 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
145 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
146
147 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
148 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
149
150 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
151 ({ \
152 int __ret = 0; \
153 if (cgroup_bpf_enabled) \
154 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
155 NULL); \
156 __ret; \
157 })
158
159 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
160 ({ \
161 int __ret = 0; \
162 if (cgroup_bpf_enabled) { \
163 lock_sock(sk); \
164 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
165 t_ctx); \
166 release_sock(sk); \
167 } \
168 __ret; \
169 })
170
171 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
172 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
173
174 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
175 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
176
177 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
178 sk->sk_prot->pre_connect)
179
180 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
181 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
182
183 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
184 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
185
186 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
187 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
188
189 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
190 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
191
192 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
193 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
194
195 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
196 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
197
198 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
199 ({ \
200 int __ret = 0; \
201 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
202 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
203 if (__sk && sk_fullsock(__sk)) \
204 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
205 sock_ops, \
206 BPF_CGROUP_SOCK_OPS); \
207 } \
208 __ret; \
209 })
210
211 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
212 ({ \
213 int __ret = 0; \
214 if (cgroup_bpf_enabled) \
215 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
216 access, \
217 BPF_CGROUP_DEVICE); \
218 \
219 __ret; \
220 })
221 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
222 enum bpf_prog_type ptype, struct bpf_prog *prog);
223 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
224 enum bpf_prog_type ptype);
225 int cgroup_bpf_prog_query(const union bpf_attr *attr,
226 union bpf_attr __user *uattr);
227 #else
228
229 struct bpf_prog;
230 struct cgroup_bpf {};
231 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
232 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
233
234 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
235 enum bpf_prog_type ptype,
236 struct bpf_prog *prog)
237 {
238 return -EINVAL;
239 }
240
241 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
242 enum bpf_prog_type ptype)
243 {
244 return -EINVAL;
245 }
246
247 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
248 union bpf_attr __user *uattr)
249 {
250 return -EINVAL;
251 }
252
253 static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
254 struct bpf_map *map) { return 0; }
255 static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
256 struct bpf_map *map) {}
257 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
258 struct bpf_prog *prog) { return 0; }
259 static inline void bpf_cgroup_storage_free(
260 struct bpf_cgroup_storage *storage) {}
261
262 #define cgroup_bpf_enabled (0)
263 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
264 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
265 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
266 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
267 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
268 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
269 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
270 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
271 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
272 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
273 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
274 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
275 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
276 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
277 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
278 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
279
280 #endif /* CONFIG_CGROUP_BPF */
281
282 #endif /* _BPF_CGROUP_H */