1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4 
5 #include <linux/compiler_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #ifdef __KERNEL__
10 
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 			  int expect, int is_constant);
19 
20 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
22 
23 #define __branch_check__(x, expect, is_constant) ({			\
24 			long ______r;					\
25 			static struct ftrace_likely_data		\
26 				__attribute__((__aligned__(4)))		\
27 				__attribute__((section("_ftrace_annotated_branch"))) \
28 				______f = {				\
29 				.data.func = __func__,			\
30 				.data.file = __FILE__,			\
31 				.data.line = __LINE__,			\
32 			};						\
33 			______r = __builtin_expect(!!(x), expect);	\
34 			ftrace_likely_update(&______f, ______r,		\
35 					     expect, is_constant);	\
36 			______r;					\
37 		})
38 
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50 
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
59 	({								\
60 		int ______r;						\
61 		static struct ftrace_branch_data			\
62 			__attribute__((__aligned__(4)))			\
63 			__attribute__((section("_ftrace_branch")))	\
64 			______f = {					\
65 				.func = __func__,			\
66 				.file = __FILE__,			\
67 				.line = __LINE__,			\
68 			};						\
69 		______r = !!(cond);					\
70 		______f.miss_hit[______r]++;					\
71 		______r;						\
72 	}))
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
74 
75 #else
76 # define likely(x)	__builtin_expect(!!(x), 1)
77 # define unlikely(x)	__builtin_expect(!!(x), 0)
78 #endif
79 
80 /* Optimization barrier */
81 #ifndef barrier
82 /* The "volatile" is due to gcc bugs */
83 # define barrier() __asm__ __volatile__("": : :"memory")
84 #endif
85 
86 #ifndef barrier_data
87 /*
88  * This version is i.e. to prevent dead stores elimination on @ptr
89  * where gcc and llvm may behave differently when otherwise using
90  * normal barrier(): while gcc behavior gets along with a normal
91  * barrier(), llvm needs an explicit input variable to be assumed
92  * clobbered. The issue is as follows: while the inline asm might
93  * access any memory it wants, the compiler could have fit all of
94  * @ptr into memory registers instead, and since @ptr never escaped
95  * from that, it proved that the inline asm wasn't touching any of
96  * it. This version works well with both compilers, i.e. we're telling
97  * the compiler that the inline asm absolutely may see the contents
98  * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
99  */
100 # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
101 #endif
102 
103 /* workaround for GCC PR82365 if needed */
104 #ifndef barrier_before_unreachable
105 # define barrier_before_unreachable() do { } while (0)
106 #endif
107 
108 /* Unreachable code */
109 #ifdef CONFIG_STACK_VALIDATION
110 /*
111  * These macros help objtool understand GCC code flow for unreachable code.
112  * The __COUNTER__ based labels are a hack to make each instance of the macros
113  * unique, to convince GCC not to merge duplicate inline asm statements.
114  */
115 #define annotate_reachable() ({						\
116 	asm volatile("%c0:\n\t"						\
117 		     ".pushsection .discard.reachable\n\t"		\
118 		     ".long %c0b - .\n\t"				\
119 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
120 })
121 #define annotate_unreachable() ({					\
122 	asm volatile("%c0:\n\t"						\
123 		     ".pushsection .discard.unreachable\n\t"		\
124 		     ".long %c0b - .\n\t"				\
125 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
126 })
127 #define ASM_UNREACHABLE							\
128 	"999:\n\t"							\
129 	".pushsection .discard.unreachable\n\t"				\
130 	".long 999b - .\n\t"						\
131 	".popsection\n\t"
132 
133 #ifdef CONFIG_DEBUG_ENTRY
134 /* Begin/end of an instrumentation safe region */
135 #define instrumentation_begin() ({					\
136 	asm volatile("%c0:\n\t"						\
137 		     ".pushsection .discard.instr_begin\n\t"		\
138 		     ".long %c0b - .\n\t"				\
139 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
140 })
141 
142 /*
143  * Because instrumentation_{begin,end}() can nest, objtool validation considers
144  * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
145  * When the value is greater than 0, we consider instrumentation allowed.
146  *
147  * There is a problem with code like:
148  *
149  * noinstr void foo()
150  * {
151  *	instrumentation_begin();
152  *	...
153  *	if (cond) {
154  *		instrumentation_begin();
155  *		...
156  *		instrumentation_end();
157  *	}
158  *	bar();
159  *	instrumentation_end();
160  * }
161  *
162  * If instrumentation_end() would be an empty label, like all the other
163  * annotations, the inner _end(), which is at the end of a conditional block,
164  * would land on the instruction after the block.
165  *
166  * If we then consider the sum of the !cond path, we'll see that the call to
167  * bar() is with a 0-value, even though, we meant it to happen with a positive
168  * value.
169  *
170  * To avoid this, have _end() be a NOP instruction, this ensures it will be
171  * part of the condition block and does not escape.
172  */
173 #define instrumentation_end() ({					\
174 	asm volatile("%c0: nop\n\t"					\
175 		     ".pushsection .discard.instr_end\n\t"		\
176 		     ".long %c0b - .\n\t"				\
177 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
178 })
179 #endif /* CONFIG_DEBUG_ENTRY */
180 
181 #else
182 #define annotate_reachable()
183 #define annotate_unreachable()
184 #endif
185 
186 #ifndef instrumentation_begin
187 #define instrumentation_begin()		do { } while(0)
188 #define instrumentation_end()		do { } while(0)
189 #endif
190 
191 #ifndef ASM_UNREACHABLE
192 # define ASM_UNREACHABLE
193 #endif
194 #ifndef unreachable
195 # define unreachable() do {		\
196 	annotate_unreachable();		\
197 	__builtin_unreachable();	\
198 } while (0)
199 #endif
200 
201 /*
202  * KENTRY - kernel entry point
203  * This can be used to annotate symbols (functions or data) that are used
204  * without their linker symbol being referenced explicitly. For example,
205  * interrupt vector handlers, or functions in the kernel image that are found
206  * programatically.
207  *
208  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
209  * are handled in their own way (with KEEP() in linker scripts).
210  *
211  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
212  * linker script. For example an architecture could KEEP() its entire
213  * boot/exception vector code rather than annotate each function and data.
214  */
215 #ifndef KENTRY
216 # define KENTRY(sym)						\
217 	extern typeof(sym) sym;					\
218 	static const unsigned long __kentry_##sym		\
219 	__used							\
220 	__attribute__((section("___kentry" "+" #sym ), used))	\
221 	= (unsigned long)&sym;
222 #endif
223 
224 #ifndef RELOC_HIDE
225 # define RELOC_HIDE(ptr, off)					\
226   ({ unsigned long __ptr;					\
227      __ptr = (unsigned long) (ptr);				\
228     (typeof(ptr)) (__ptr + (off)); })
229 #endif
230 
231 #define absolute_pointer(val)	RELOC_HIDE((void *)(val), 0)
232 
233 #ifndef OPTIMIZER_HIDE_VAR
234 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
235 #define OPTIMIZER_HIDE_VAR(var)						\
236 	__asm__ ("" : "=r" (var) : "0" (var))
237 #endif
238 
239 /* Not-quite-unique ID. */
240 #ifndef __UNIQUE_ID
241 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
242 #endif
243 
244 #include <uapi/linux/types.h>
245 
246 #define __READ_ONCE_SIZE						\
247 ({									\
248 	switch (size) {							\
249 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
250 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
251 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
252 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
253 	default:							\
254 		barrier();						\
255 		__builtin_memcpy((void *)res, (const void *)p, size);	\
256 		barrier();						\
257 	}								\
258 })
259 
260 static __always_inline
__read_once_size(const volatile void * p,void * res,int size)261 void __read_once_size(const volatile void *p, void *res, int size)
262 {
263 	__READ_ONCE_SIZE;
264 }
265 
266 #ifdef CONFIG_KASAN
267 /*
268  * We can't declare function 'inline' because __no_sanitize_address confilcts
269  * with inlining. Attempt to inline it may cause a build failure.
270  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
271  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
272  */
273 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
274 #else
275 # define __no_kasan_or_inline __always_inline
276 #endif
277 
278 static __no_kasan_or_inline
__read_once_size_nocheck(const volatile void * p,void * res,int size)279 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
280 {
281 	__READ_ONCE_SIZE;
282 }
283 
__write_once_size(volatile void * p,void * res,int size)284 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
285 {
286 	switch (size) {
287 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
288 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
289 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
290 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
291 	default:
292 		barrier();
293 		__builtin_memcpy((void *)p, (const void *)res, size);
294 		barrier();
295 	}
296 }
297 
298 /*
299  * Prevent the compiler from merging or refetching reads or writes. The
300  * compiler is also forbidden from reordering successive instances of
301  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
302  * particular ordering. One way to make the compiler aware of ordering is to
303  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
304  * statements.
305  *
306  * These two macros will also work on aggregate data types like structs or
307  * unions. If the size of the accessed data type exceeds the word size of
308  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
309  * fall back to memcpy(). There's at least two memcpy()s: one for the
310  * __builtin_memcpy() and then one for the macro doing the copy of variable
311  * - '__u' allocated on the stack.
312  *
313  * Their two major use cases are: (1) Mediating communication between
314  * process-level code and irq/NMI handlers, all running on the same CPU,
315  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
316  * mutilate accesses that either do not require ordering or that interact
317  * with an explicit memory barrier or atomic instruction that provides the
318  * required ordering.
319  */
320 #include <asm/barrier.h>
321 #include <linux/kasan-checks.h>
322 
323 #define __READ_ONCE(x, check)						\
324 ({									\
325 	union { typeof(x) __val; char __c[1]; } __u;			\
326 	if (check)							\
327 		__read_once_size(&(x), __u.__c, sizeof(x));		\
328 	else								\
329 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
330 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
331 	__u.__val;							\
332 })
333 #define READ_ONCE(x) __READ_ONCE(x, 1)
334 
335 /*
336  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
337  * to hide memory access from KASAN.
338  */
339 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
340 
341 static __no_kasan_or_inline
read_word_at_a_time(const void * addr)342 unsigned long read_word_at_a_time(const void *addr)
343 {
344 	kasan_check_read(addr, 1);
345 	return *(unsigned long *)addr;
346 }
347 
348 #define WRITE_ONCE(x, val) \
349 ({							\
350 	union { typeof(x) __val; char __c[1]; } __u =	\
351 		{ .__val = (__force typeof(x)) (val) }; \
352 	__write_once_size(&(x), __u.__c, sizeof(x));	\
353 	__u.__val;					\
354 })
355 
356 #endif /* __KERNEL__ */
357 
358 /*
359  * Force the compiler to emit 'sym' as a symbol, so that we can reference
360  * it from inline assembler. Necessary in case 'sym' could be inlined
361  * otherwise, or eliminated entirely due to lack of references that are
362  * visible to the compiler.
363  */
364 #define __ADDRESSABLE(sym) \
365 	static void * __attribute__((section(".discard.addressable"), used)) \
366 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
367 
368 /**
369  * offset_to_ptr - convert a relative memory offset to an absolute pointer
370  * @off:	the address of the 32-bit offset value
371  */
offset_to_ptr(const int * off)372 static inline void *offset_to_ptr(const int *off)
373 {
374 	return (void *)((unsigned long)off + *off);
375 }
376 
377 #endif /* __ASSEMBLY__ */
378 
379 #ifndef __optimize
380 # define __optimize(level)
381 #endif
382 
383 /* Compile time object size, -1 for unknown */
384 #ifndef __compiletime_object_size
385 # define __compiletime_object_size(obj) -1
386 #endif
387 #ifndef __compiletime_warning
388 # define __compiletime_warning(message)
389 #endif
390 #ifndef __compiletime_error
391 # define __compiletime_error(message)
392 #endif
393 
394 #ifdef __OPTIMIZE__
395 # define __compiletime_assert(condition, msg, prefix, suffix)		\
396 	do {								\
397 		extern void prefix ## suffix(void) __compiletime_error(msg); \
398 		if (!(condition))					\
399 			prefix ## suffix();				\
400 	} while (0)
401 #else
402 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
403 #endif
404 
405 #define _compiletime_assert(condition, msg, prefix, suffix) \
406 	__compiletime_assert(condition, msg, prefix, suffix)
407 
408 /**
409  * compiletime_assert - break build and emit msg if condition is false
410  * @condition: a compile-time constant condition to check
411  * @msg:       a message to emit if condition is false
412  *
413  * In tradition of POSIX assert, this macro will break the build if the
414  * supplied condition is *false*, emitting the supplied error message if the
415  * compiler has support to do so.
416  */
417 #define compiletime_assert(condition, msg) \
418 	_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
419 
420 #define compiletime_assert_atomic_type(t)				\
421 	compiletime_assert(__native_word(t),				\
422 		"Need native word sized stores/loads for atomicity.")
423 
424 /*
425  * This is needed in functions which generate the stack canary, see
426  * arch/x86/kernel/smpboot.c::start_secondary() for an example.
427  */
428 #define prevent_tail_call_optimization()	mb()
429 
430 #endif /* __LINUX_COMPILER_H */
431