workqueue fix init
This commit is contained in:
@@ -80,7 +80,7 @@ static long device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||
ret = copy_to_user_variant_buffer(
|
||||
&load_monitor_variant_buffer, dump_param.user_ptr_len,
|
||||
dump_param.user_buf, dump_param.user_buf_len);
|
||||
// printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len);
|
||||
printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len);
|
||||
}
|
||||
printk(KERN_INFO "copy_to_user_variant_buffer \n");
|
||||
break;
|
||||
|
||||
@@ -57,7 +57,7 @@ static int init_buffer(unsigned int buf_size) {
|
||||
|
||||
static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) {
|
||||
unsigned int nr_bt;
|
||||
printk(KERN_INFO "diag_tsk\n");
|
||||
// printk(KERN_INFO "diag_tsk\n");
|
||||
diag_task_brief(p, &tsk_info->task); // task brief
|
||||
// printk("1\n");
|
||||
diag_task_user_stack(p, &tsk_info->user_stack); // user stack
|
||||
@@ -78,7 +78,7 @@ static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) {
|
||||
}
|
||||
|
||||
static void push_tsk_info(variable_monitor_task *tsk_info,unsigned long *flags) {
|
||||
printk(KERN_INFO "push_tsk_info\n");
|
||||
// printk(KERN_INFO "push_tsk_info\n");
|
||||
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, *flags);
|
||||
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
||||
sizeof(variable_monitor_task));
|
||||
@@ -101,11 +101,14 @@ static void clear_all_watch(void) {
|
||||
memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list));
|
||||
}
|
||||
|
||||
static void sample_task_work(struct work_struct *work){
|
||||
void sample_task_work(struct work_struct *work){
|
||||
|
||||
kernel_watch_timer *k_watch_timer = container_of(work, kernel_watch_timer, wk);
|
||||
|
||||
if (k_watch_timer->threshold_num <= 0) return;
|
||||
|
||||
printk(KERN_INFO "sample_task_work\n");
|
||||
|
||||
struct task_struct *g, *p; // g: task group; p: task
|
||||
unsigned long flags;
|
||||
unsigned long event_id = get_cycles();
|
||||
@@ -124,6 +127,17 @@ static void sample_task_work(struct work_struct *work){
|
||||
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
|
||||
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
|
||||
}
|
||||
// !todo 调整输出
|
||||
printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv);
|
||||
|
||||
for (i = 0; i < vm_record.threshold_num; i++) {
|
||||
printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n",
|
||||
vm_record.threshold_record[i].task_id,
|
||||
vm_record.threshold_record[i]
|
||||
.name, // Assuming name is a null-terminated string
|
||||
vm_record.threshold_record[i].ptr,
|
||||
vm_record.threshold_record[i].threshold);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@@ -154,6 +168,7 @@ static void sample_task_work(struct work_struct *work){
|
||||
}
|
||||
}
|
||||
while_each_thread(g, p);
|
||||
return ;
|
||||
}
|
||||
/**
|
||||
* @brief all module function init. orig_X | buffer | workqueue
|
||||
@@ -170,11 +185,11 @@ int monitor_init(void) {
|
||||
if (ret)
|
||||
return -1;
|
||||
// init workqueue
|
||||
int i;
|
||||
for (i=0; i < MAX_TIMER_NUM; i++) {
|
||||
kernel_watch_timer *kw_timer = &kernel_wtimer_list[i];
|
||||
INIT_WORK(&kw_timer->wk, sample_task_work);
|
||||
}
|
||||
// int i;
|
||||
// for (i=0; i < MAX_TIMER_NUM; i++) {
|
||||
// kernel_watch_timer *kw_timer = &kernel_wtimer_list[i];
|
||||
// INIT_WORK(&kw_timer->wk, sample_task_work);
|
||||
// }
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -218,6 +233,8 @@ int start_watch_variable(watch_arg warg) {
|
||||
w_arg2k_w_arg(kptr, warg, &k_watch_arg);
|
||||
timer = get_timer(warg.time_ns); // get a valuable timer
|
||||
|
||||
INIT_WORK(&timer->wk, sample_task_work);
|
||||
|
||||
printk(KERN_INFO "ptr transform kptr: %p\n", kptr);
|
||||
printk(KERN_INFO "timer: %p\n", timer);
|
||||
printk(KERN_INFO "timer->sentinel: %d, timer->time_ns: %lld\n",
|
||||
|
||||
@@ -27,12 +27,18 @@ unsigned char del_all_kwarg_by_pid(pid_t pid) {
|
||||
timer = &(kernel_wtimer_list[i]);
|
||||
if (TIMER_NO_KWARG(timer)) // no available kwarg
|
||||
{
|
||||
// cancel and destroy timer.work
|
||||
// make sure empty timer has no work active
|
||||
cancel_work_sync(&timer->wk);
|
||||
destroy_work_on_stack(&timer->wk);
|
||||
|
||||
if (i != kernel_wtimer_num - 1) {
|
||||
memcpy(timer, &kernel_wtimer_list[kernel_wtimer_num - 1],
|
||||
sizeof(kernel_watch_timer));
|
||||
}
|
||||
kernel_wtimer_num--;
|
||||
i--;
|
||||
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -102,7 +102,7 @@ static int copy_stack_frame_remote(struct task_struct *tsk,
|
||||
}
|
||||
|
||||
ret = orig_access_remote_vm(mm, (unsigned long)fp, frame, sizeof(*frame), 0);
|
||||
printk("copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret);
|
||||
// printk("copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret);
|
||||
mmput(mm);
|
||||
|
||||
return ret;
|
||||
@@ -134,30 +134,30 @@ static inline void save_stack_trace_user_remote(struct task_struct *tsk,
|
||||
frame.ret_addr = 0;
|
||||
|
||||
if (!copy_stack_frame_remote(tsk, fp, &frame)) {
|
||||
printk("save_stack_trace_user_remote %d copy_stack_frame_remote fail\n",
|
||||
tsk->pid);
|
||||
// printk("save_stack_trace_user_remote %d copy_stack_frame_remote fail\n",
|
||||
// tsk->pid);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((unsigned long)fp < regs->sp) {
|
||||
printk("save_stack_trace_user_remote %d fp < sp count:%d\n", tsk->pid,
|
||||
count);
|
||||
// printk("save_stack_trace_user_remote %d fp < sp count:%d\n", tsk->pid,
|
||||
// count);
|
||||
break; // 如果fp小于sp,说明已经到了栈底,退出
|
||||
}
|
||||
// 如果返回地址不为0,说明是一个有效的栈帧,保存返回地址
|
||||
if (frame.ret_addr) {
|
||||
trace->entries[trace->nr_entries++] = frame.ret_addr;
|
||||
printk("save_stack_trace_user_remote %d ret_addr:%lx\n", tsk->pid,
|
||||
frame.ret_addr);
|
||||
// printk("save_stack_trace_user_remote %d ret_addr:%lx\n", tsk->pid,
|
||||
// frame.ret_addr);
|
||||
} else {
|
||||
printk("save_stack_trace_user_remote %d no ret_addr", tsk->pid);
|
||||
// printk("save_stack_trace_user_remote %d no ret_addr", tsk->pid);
|
||||
break;
|
||||
// continue;
|
||||
}
|
||||
|
||||
// 如果fp指向自己,说明已经到了栈底,退出
|
||||
if (fp == frame.next_fp) {
|
||||
printk("save_stack_trace_user_remote %d fp == next_fp", tsk->pid);
|
||||
// printk("save_stack_trace_user_remote %d fp == next_fp", tsk->pid);
|
||||
break;
|
||||
}
|
||||
fp = frame.next_fp; // 否则,继续向下遍历
|
||||
@@ -237,18 +237,18 @@ static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk,
|
||||
* Trace user stack if we are not a kernel thread
|
||||
*/
|
||||
if (tsk->mm) {
|
||||
printk("save_stack_trace_user_remote %d mm\n", tsk->pid);
|
||||
// printk("save_stack_trace_user_remote %d mm\n", tsk->pid);
|
||||
save_stack_trace_user_remote(tsk, &trace);
|
||||
}
|
||||
if (trace.nr_entries < trace.max_entries)
|
||||
trace.entries[trace.nr_entries++] = ULONG_MAX;
|
||||
|
||||
printk("save_stack_trace_user_remote %d, stack: [", tsk->pid);
|
||||
int i = 0;
|
||||
for (i = 0; i < BACKTRACE_DEPTH; i++) {
|
||||
printk("%lx, ", backtrace[i]);
|
||||
}
|
||||
printk("]\n");
|
||||
// printk("save_stack_trace_user_remote %d, stack: [", tsk->pid);
|
||||
// int i = 0;
|
||||
// for (i = 0; i < BACKTRACE_DEPTH; i++) {
|
||||
// printk("%lx, ", backtrace[i]);
|
||||
// }
|
||||
// printk("]\n");
|
||||
}
|
||||
|
||||
static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to,
|
||||
@@ -384,10 +384,10 @@ void diag_task_user_stack(struct task_struct *tsk, user_stack_detail *detail) {
|
||||
detail->bp = bp;
|
||||
|
||||
if (tsk == current) {
|
||||
printk("diag_task_user_stack %d current\n", tsk->pid);
|
||||
// printk("diag_task_user_stack %d current\n", tsk->pid);
|
||||
diagnose_save_stack_trace_user(detail->stack);
|
||||
} else {
|
||||
printk("diag_task_user_stack %d no current\n", tsk->pid);
|
||||
// printk("diag_task_user_stack %d no current\n", tsk->pid);
|
||||
diagnose_save_stack_trace_user_remote(tsk, detail->stack);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user