1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4 
5 #include <linux/errno.h>
6 #include <linux/jump_label.h>
7 #include <linux/percpu.h>
8 #include <linux/rbtree.h>
9 #include <uapi/linux/bpf.h>
10 
11 struct sock;
12 struct sockaddr;
13 struct cgroup;
14 struct sk_buff;
15 struct bpf_map;
16 struct bpf_prog;
17 struct bpf_sock_ops_kern;
18 struct bpf_cgroup_storage;
19 
20 #ifdef CONFIG_CGROUP_BPF
21 
22 extern struct static_key_false cgroup_bpf_enabled_key;
23 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
24 
25 DECLARE_PER_CPU(void*, bpf_cgroup_storage);
26 
27 struct bpf_cgroup_storage_map;
28 
29 struct bpf_storage_buffer {
30 	struct rcu_head rcu;
31 	char data[0];
32 };
33 
34 struct bpf_cgroup_storage {
35 	struct bpf_storage_buffer *buf;
36 	struct bpf_cgroup_storage_map *map;
37 	struct bpf_cgroup_storage_key key;
38 	struct list_head list;
39 	struct rb_node node;
40 	struct rcu_head rcu;
41 };
42 
43 struct bpf_prog_list {
44 	struct list_head node;
45 	struct bpf_prog *prog;
46 	struct bpf_cgroup_storage *storage;
47 };
48 
49 struct bpf_prog_array;
50 
51 struct cgroup_bpf {
52 	/* array of effective progs in this cgroup */
53 	struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
54 
55 	/* attached progs to this cgroup and attach flags
56 	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
57 	 * have either zero or one element
58 	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
59 	 */
60 	struct list_head progs[MAX_BPF_ATTACH_TYPE];
61 	u32 flags[MAX_BPF_ATTACH_TYPE];
62 
63 	/* temp storage for effective prog array used by prog_attach/detach */
64 	struct bpf_prog_array __rcu *inactive;
65 };
66 
67 void cgroup_bpf_put(struct cgroup *cgrp);
68 int cgroup_bpf_inherit(struct cgroup *cgrp);
69 
70 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
71 			enum bpf_attach_type type, u32 flags);
72 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
73 			enum bpf_attach_type type, u32 flags);
74 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
75 		       union bpf_attr __user *uattr);
76 
77 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
78 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
79 		      enum bpf_attach_type type, u32 flags);
80 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
81 		      enum bpf_attach_type type, u32 flags);
82 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
83 		     union bpf_attr __user *uattr);
84 
85 int __cgroup_bpf_run_filter_skb(struct sock *sk,
86 				struct sk_buff *skb,
87 				enum bpf_attach_type type);
88 
89 int __cgroup_bpf_run_filter_sk(struct sock *sk,
90 			       enum bpf_attach_type type);
91 
92 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
93 				      struct sockaddr *uaddr,
94 				      enum bpf_attach_type type,
95 				      void *t_ctx);
96 
97 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
98 				     struct bpf_sock_ops_kern *sock_ops,
99 				     enum bpf_attach_type type);
100 
101 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
102 				      short access, enum bpf_attach_type type);
103 
bpf_cgroup_storage_set(struct bpf_cgroup_storage * storage)104 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
105 {
106 	struct bpf_storage_buffer *buf;
107 
108 	if (!storage)
109 		return;
110 
111 	buf = READ_ONCE(storage->buf);
112 	this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
113 }
114 
115 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
116 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
117 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
118 			     struct cgroup *cgroup,
119 			     enum bpf_attach_type type);
120 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
121 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
122 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
123 
124 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
125 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
126 ({									      \
127 	int __ret = 0;							      \
128 	if (cgroup_bpf_enabled)						      \
129 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
130 						    BPF_CGROUP_INET_INGRESS); \
131 									      \
132 	__ret;								      \
133 })
134 
135 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
136 ({									       \
137 	int __ret = 0;							       \
138 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
139 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
140 		if (sk_fullsock(__sk))					       \
141 			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
142 						      BPF_CGROUP_INET_EGRESS); \
143 	}								       \
144 	__ret;								       \
145 })
146 
147 #define BPF_CGROUP_RUN_SK_PROG(sk, type)				       \
148 ({									       \
149 	int __ret = 0;							       \
150 	if (cgroup_bpf_enabled) {					       \
151 		__ret = __cgroup_bpf_run_filter_sk(sk, type);		       \
152 	}								       \
153 	__ret;								       \
154 })
155 
156 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
157 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
158 
159 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
160 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
161 
162 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
163 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
164 
165 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)				       \
166 ({									       \
167 	int __ret = 0;							       \
168 	if (cgroup_bpf_enabled)						       \
169 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
170 							  NULL);	       \
171 	__ret;								       \
172 })
173 
174 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)		       \
175 ({									       \
176 	int __ret = 0;							       \
177 	if (cgroup_bpf_enabled)	{					       \
178 		lock_sock(sk);						       \
179 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
180 							  t_ctx);	       \
181 		release_sock(sk);					       \
182 	}								       \
183 	__ret;								       \
184 })
185 
186 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
187 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
188 
189 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
190 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
191 
192 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
193 					    sk->sk_prot->pre_connect)
194 
195 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
196 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
197 
198 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
199 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
200 
201 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
202 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
203 
204 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
205 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
206 
207 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
208 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
209 
210 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
211 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
212 
213 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
214 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
215 
216 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
217 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
218 
219 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
220 ({									       \
221 	int __ret = 0;							       \
222 	if (cgroup_bpf_enabled && (sock_ops)->sk) {	       \
223 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
224 		if (__sk && sk_fullsock(__sk))				       \
225 			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
226 								 sock_ops,     \
227 							 BPF_CGROUP_SOCK_OPS); \
228 	}								       \
229 	__ret;								       \
230 })
231 
232 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
233 ({									      \
234 	int __ret = 0;							      \
235 	if (cgroup_bpf_enabled)						      \
236 		__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
237 							  access,	      \
238 							  BPF_CGROUP_DEVICE); \
239 									      \
240 	__ret;								      \
241 })
242 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
243 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
244 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
245 			   enum bpf_prog_type ptype);
246 int cgroup_bpf_prog_query(const union bpf_attr *attr,
247 			  union bpf_attr __user *uattr);
248 #else
249 
250 struct bpf_prog;
251 struct cgroup_bpf {};
cgroup_bpf_put(struct cgroup * cgrp)252 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
cgroup_bpf_inherit(struct cgroup * cgrp)253 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
254 
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)255 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
256 					 enum bpf_prog_type ptype,
257 					 struct bpf_prog *prog)
258 {
259 	return -EINVAL;
260 }
261 
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)262 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
263 					 enum bpf_prog_type ptype)
264 {
265 	return -EINVAL;
266 }
267 
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)268 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
269 					union bpf_attr __user *uattr)
270 {
271 	return -EINVAL;
272 }
273 
bpf_cgroup_storage_set(struct bpf_cgroup_storage * storage)274 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
bpf_cgroup_storage_assign(struct bpf_prog * prog,struct bpf_map * map)275 static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
276 					    struct bpf_map *map) { return 0; }
bpf_cgroup_storage_release(struct bpf_prog * prog,struct bpf_map * map)277 static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
278 					      struct bpf_map *map) {}
bpf_cgroup_storage_alloc(struct bpf_prog * prog)279 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
280 	struct bpf_prog *prog) { return 0; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)281 static inline void bpf_cgroup_storage_free(
282 	struct bpf_cgroup_storage *storage) {}
283 
284 #define cgroup_bpf_enabled (0)
285 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
286 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
287 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
288 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
289 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
290 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
291 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
292 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
293 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
294 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
295 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
296 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
297 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
298 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
299 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
300 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
301 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
302 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
303 
304 #endif /* CONFIG_CGROUP_BPF */
305 
306 #endif /* _BPF_CGROUP_H */
307