Commit db045733 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching

Pull livepatching updates from Jiri Kosina:

 - stacktrace handling improvements from Miroslav benes

 - debug output improvements from Petr Mladek

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching:
  livepatch: Remove duplicate warning about missing reliable stacktrace support
  Revert "livepatch: Remove reliable stacktrace check in klp_try_switch_task()"
  stacktrace: Remove weak version of save_stack_trace_tsk_reliable()
  livepatch: Use static buffer for debugging messages under rq lock
  livepatch: Remove stale kobj_added entries from kernel-doc descriptions
parents 1f7563f7 38195dd5
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
* @stack_node: list node for klp_ops func_stack list * @stack_node: list node for klp_ops func_stack list
* @old_size: size of the old function * @old_size: size of the old function
* @new_size: size of the new function * @new_size: size of the new function
* @kobj_added: @kobj has been added and needs freeing
* @nop: temporary patch to use the original code again; dyn. allocated * @nop: temporary patch to use the original code again; dyn. allocated
* @patched: the func has been added to the klp_ops list * @patched: the func has been added to the klp_ops list
* @transition: the func is currently being applied or reverted * @transition: the func is currently being applied or reverted
...@@ -113,7 +112,6 @@ struct klp_callbacks { ...@@ -113,7 +112,6 @@ struct klp_callbacks {
* @node: list node for klp_patch obj_list * @node: list node for klp_patch obj_list
* @mod: kernel module associated with the patched object * @mod: kernel module associated with the patched object
* (NULL for vmlinux) * (NULL for vmlinux)
* @kobj_added: @kobj has been added and needs freeing
* @dynamic: temporary object for nop functions; dynamically allocated * @dynamic: temporary object for nop functions; dynamically allocated
* @patched: the object's funcs have been added to the klp_ops list * @patched: the object's funcs have been added to the klp_ops list
*/ */
...@@ -140,7 +138,6 @@ struct klp_object { ...@@ -140,7 +138,6 @@ struct klp_object {
* @list: list node for global list of actively used patches * @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources * @kobj: kobject for sysfs resources
* @obj_list: dynamic list of the object entries * @obj_list: dynamic list of the object entries
* @kobj_added: @kobj has been added and needs freeing
* @enabled: the patch is enabled (but operation may be incomplete) * @enabled: the patch is enabled (but operation may be incomplete)
* @forced: was involved in a forced transition * @forced: was involved in a forced transition
* @free_work: patch cleanup from workqueue-context * @free_work: patch cleanup from workqueue-context
......
...@@ -247,7 +247,6 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -247,7 +247,6 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
int ret, nr_entries; int ret, nr_entries;
ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
WARN_ON_ONCE(ret == -ENOSYS);
if (ret < 0) { if (ret < 0) {
snprintf(err_buf, STACK_ERR_BUF_SIZE, snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d has an unreliable stack\n", "%s: %s:%d has an unreliable stack\n",
...@@ -281,11 +280,11 @@ static int klp_check_stack(struct task_struct *task, char *err_buf) ...@@ -281,11 +280,11 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
*/ */
static bool klp_try_switch_task(struct task_struct *task) static bool klp_try_switch_task(struct task_struct *task)
{ {
static char err_buf[STACK_ERR_BUF_SIZE];
struct rq *rq; struct rq *rq;
struct rq_flags flags; struct rq_flags flags;
int ret; int ret;
bool success = false; bool success = false;
char err_buf[STACK_ERR_BUF_SIZE];
err_buf[0] = '\0'; err_buf[0] = '\0';
...@@ -293,6 +292,13 @@ static bool klp_try_switch_task(struct task_struct *task) ...@@ -293,6 +292,13 @@ static bool klp_try_switch_task(struct task_struct *task)
if (task->patch_state == klp_target_state) if (task->patch_state == klp_target_state)
return true; return true;
/*
* For arches which don't have reliable stack traces, we have to rely
* on other methods (e.g., switching tasks at kernel exit).
*/
if (!klp_have_reliable_stack())
return false;
/* /*
* Now try to check the stack for any to-be-patched or to-be-unpatched * Now try to check the stack for any to-be-patched or to-be-unpatched
* functions. If all goes well, switch the task to the target patch * functions. If all goes well, switch the task to the target patch
...@@ -328,7 +334,6 @@ static bool klp_try_switch_task(struct task_struct *task) ...@@ -328,7 +334,6 @@ static bool klp_try_switch_task(struct task_struct *task)
pr_debug("%s", err_buf); pr_debug("%s", err_buf);
return success; return success;
} }
/* /*
......
...@@ -255,14 +255,6 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) ...@@ -255,14 +255,6 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n"); WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
} }
__weak int
save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
return -ENOSYS;
}
/** /**
* stack_trace_save - Save a stack trace into a storage array * stack_trace_save - Save a stack trace into a storage array
* @store: Pointer to storage array * @store: Pointer to storage array
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment