1 /*
2 * livepatch.h - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef _LINUX_LIVEPATCH_H_
22 #define _LINUX_LIVEPATCH_H_
23
24 #include <linux/module.h>
25 #include <linux/ftrace.h>
26 #include <linux/completion.h>
27
28 #if IS_ENABLED(CONFIG_LIVEPATCH)
29
30 #include <asm/livepatch.h>
31
32 /* task patch states */
33 #define KLP_UNDEFINED -1
34 #define KLP_UNPATCHED 0
35 #define KLP_PATCHED 1
36
37 /**
38 * struct klp_func - function structure for live patching
39 * @old_name: name of the function to be patched
40 * @new_func: pointer to the patched function code
41 * @old_sympos: a hint indicating which symbol position the old function
42 * can be found (optional)
43 * @old_addr: the address of the function being patched
44 * @kobj: kobject for sysfs resources
45 * @stack_node: list node for klp_ops func_stack list
46 * @old_size: size of the old function
47 * @new_size: size of the new function
48 * @patched: the func has been added to the klp_ops list
49 * @transition: the func is currently being applied or reverted
50 *
51 * The patched and transition variables define the func's patching state. When
52 * patching, a func is always in one of the following states:
53 *
54 * patched=0 transition=0: unpatched
55 * patched=0 transition=1: unpatched, temporary starting state
56 * patched=1 transition=1: patched, may be visible to some tasks
57 * patched=1 transition=0: patched, visible to all tasks
58 *
59 * And when unpatching, it goes in the reverse order:
60 *
61 * patched=1 transition=0: patched, visible to all tasks
62 * patched=1 transition=1: patched, may be visible to some tasks
63 * patched=0 transition=1: unpatched, temporary ending state
64 * patched=0 transition=0: unpatched
65 */
66 struct klp_func {
67 /* external */
68 const char *old_name;
69 void *new_func;
70 /*
71 * The old_sympos field is optional and can be used to resolve
72 * duplicate symbol names in livepatch objects. If this field is zero,
73 * it is expected the symbol is unique, otherwise patching fails. If
74 * this value is greater than zero then that occurrence of the symbol
75 * in kallsyms for the given object is used.
76 */
77 unsigned long old_sympos;
78
79 /* internal */
80 unsigned long old_addr;
81 struct kobject kobj;
82 struct list_head stack_node;
83 unsigned long old_size, new_size;
84 bool patched;
85 bool transition;
86 };
87
88 struct klp_object;
89
90 /**
91 * struct klp_callbacks - pre/post live-(un)patch callback structure
92 * @pre_patch: executed before code patching
93 * @post_patch: executed after code patching
94 * @pre_unpatch: executed before code unpatching
95 * @post_unpatch: executed after code unpatching
96 * @post_unpatch_enabled: flag indicating if post-unpatch callback
97 * should run
98 *
99 * All callbacks are optional. Only the pre-patch callback, if provided,
100 * will be unconditionally executed. If the parent klp_object fails to
101 * patch for any reason, including a non-zero error status returned from
102 * the pre-patch callback, no further callbacks will be executed.
103 */
104 struct klp_callbacks {
105 int (*pre_patch)(struct klp_object *obj);
106 void (*post_patch)(struct klp_object *obj);
107 void (*pre_unpatch)(struct klp_object *obj);
108 void (*post_unpatch)(struct klp_object *obj);
109 bool post_unpatch_enabled;
110 };
111
112 /**
113 * struct klp_object - kernel object structure for live patching
114 * @name: module name (or NULL for vmlinux)
115 * @funcs: function entries for functions to be patched in the object
116 * @callbacks: functions to be executed pre/post (un)patching
117 * @kobj: kobject for sysfs resources
118 * @mod: kernel module associated with the patched object
119 * (NULL for vmlinux)
120 * @patched: the object's funcs have been added to the klp_ops list
121 */
122 struct klp_object {
123 /* external */
124 const char *name;
125 struct klp_func *funcs;
126 struct klp_callbacks callbacks;
127
128 /* internal */
129 struct kobject kobj;
130 struct module *mod;
131 bool patched;
132 };
133
134 /**
135 * struct klp_patch - patch structure for live patching
136 * @mod: reference to the live patch module
137 * @objs: object entries for kernel objects to be patched
138 * @list: list node for global list of registered patches
139 * @kobj: kobject for sysfs resources
140 * @enabled: the patch is enabled (but operation may be incomplete)
141 * @finish: for waiting till it is safe to remove the patch module
142 */
143 struct klp_patch {
144 /* external */
145 struct module *mod;
146 struct klp_object *objs;
147
148 /* internal */
149 struct list_head list;
150 struct kobject kobj;
151 bool enabled;
152 struct completion finish;
153 };
154
155 #define klp_for_each_object(patch, obj) \
156 for (obj = patch->objs; obj->funcs || obj->name; obj++)
157
158 #define klp_for_each_func(obj, func) \
159 for (func = obj->funcs; \
160 func->old_name || func->new_func || func->old_sympos; \
161 func++)
162
163 int klp_register_patch(struct klp_patch *);
164 int klp_unregister_patch(struct klp_patch *);
165 int klp_enable_patch(struct klp_patch *);
166 int klp_disable_patch(struct klp_patch *);
167
168 void arch_klp_init_object_loaded(struct klp_patch *patch,
169 struct klp_object *obj);
170
171 /* Called from the module loader during module coming/going states */
172 int klp_module_coming(struct module *mod);
173 void klp_module_going(struct module *mod);
174
175 void klp_copy_process(struct task_struct *child);
176 void klp_update_patch_state(struct task_struct *task);
177
klp_patch_pending(struct task_struct * task)178 static inline bool klp_patch_pending(struct task_struct *task)
179 {
180 return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
181 }
182
klp_have_reliable_stack(void)183 static inline bool klp_have_reliable_stack(void)
184 {
185 return IS_ENABLED(CONFIG_STACKTRACE) &&
186 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
187 }
188
189 typedef int (*klp_shadow_ctor_t)(void *obj,
190 void *shadow_data,
191 void *ctor_data);
192 typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
193
194 void *klp_shadow_get(void *obj, unsigned long id);
195 void *klp_shadow_alloc(void *obj, unsigned long id,
196 size_t size, gfp_t gfp_flags,
197 klp_shadow_ctor_t ctor, void *ctor_data);
198 void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
199 size_t size, gfp_t gfp_flags,
200 klp_shadow_ctor_t ctor, void *ctor_data);
201 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
202 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
203
204 #else /* !CONFIG_LIVEPATCH */
205
klp_module_coming(struct module * mod)206 static inline int klp_module_coming(struct module *mod) { return 0; }
klp_module_going(struct module * mod)207 static inline void klp_module_going(struct module *mod) {}
klp_patch_pending(struct task_struct * task)208 static inline bool klp_patch_pending(struct task_struct *task) { return false; }
klp_update_patch_state(struct task_struct * task)209 static inline void klp_update_patch_state(struct task_struct *task) {}
klp_copy_process(struct task_struct * child)210 static inline void klp_copy_process(struct task_struct *child) {}
211
212 #endif /* CONFIG_LIVEPATCH */
213
214 #endif /* _LINUX_LIVEPATCH_H_ */
215