From 49fb0b5f2ec2d897072768a4634bc5beb0818143 Mon Sep 17 00:00:00 2001 From: zy Date: Mon, 27 Nov 2023 15:16:35 +0800 Subject: [PATCH] module: code clean --- source/module/monitor_kallsyms.c | 7 +- source/module/monitor_kernel.c | 85 ++++---- source/module/monitor_kernel.h | 14 +- source/module/monitor_kernel_lib.c | 325 ++++++++++++++--------------- source/module/monitor_mem.c | 6 +- source/module/monitor_mem.h | 8 +- source/module/monitor_trace.c | 178 +++++++--------- source/module/monitor_trace.h | 25 +-- 8 files changed, 310 insertions(+), 338 deletions(-) diff --git a/source/module/monitor_kallsyms.c b/source/module/monitor_kallsyms.c index 9517b9b..182412e 100644 --- a/source/module/monitor_kallsyms.c +++ b/source/module/monitor_kallsyms.c @@ -4,6 +4,7 @@ unsigned long (*diag_kallsyms_lookup_name)(const char *name); struct kprobe kprobe_kallsyms_lookup_name = {.symbol_name = "kallsyms_lookup_name"}; +// orig_fun unsigned int (*orig_stack_trace_save_tsk)(struct task_struct *task, unsigned long *store, unsigned int size, @@ -18,7 +19,11 @@ int (*orig_access_remote_vm)(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); struct task_struct *(*orig_find_task_by_vpid)(pid_t nr); - +/** + * @brief diag_kallsyms_lookup_name init + * + * @return int + */ static int fn_kallsyms_lookup_name_init(void) { register_kprobe(&kprobe_kallsyms_lookup_name); diag_kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr; diff --git a/source/module/monitor_kernel.c b/source/module/monitor_kernel.c index 82092c0..fd26828 100644 --- a/source/module/monitor_kernel.c +++ b/source/module/monitor_kernel.c @@ -1,12 +1,12 @@ #include "monitor_kernel.h" -#include // for cdev +#include // for cdev #include #include #include #include #include -#include // for kmalloc +#include // for kmalloc #define DEVICE_NAME "variable_monitor" @@ -25,7 +25,8 @@ static int device_open(struct inode *inode, struct file *file) { current->pid); // save pid data = kmalloc(sizeof(*data), GFP_KERNEL); - if (!data) return -ENOMEM; + if (!data) + return -ENOMEM; data->pid = current->pid; file->private_data = data; return 0; @@ -38,12 +39,12 @@ static int device_release(struct inode *inode, struct file *file) { data->pid); // clear watch with pid clear_watch(data->pid); - kfree(data); // free data memory + kfree(data); // free data memory return 0; } typedef struct { - int pid; + int pid; } ioctl_pid; static long device_ioctl(struct file *file, unsigned int ioctl_num, @@ -51,50 +52,46 @@ static long device_ioctl(struct file *file, unsigned int ioctl_num, int ret = 0; watch_arg warg; ioctl_dump_param dump_param; - // ioctl_pid wpid; + ioctl_pid wpid; printk(KERN_INFO "variable_monitor fun: %s with ioctl_num %d\n", __FUNCTION__, ioctl_num); switch (ioctl_num) { - case 0: - // copy watch_arg - if (copy_from_user(&warg, (watch_arg *)ioctl_param, sizeof(warg))) { - return -EACCES; - } - printk(KERN_INFO - "Watch_arg: task_id=%d, name=%s, ptr=%p, length_byte=%d, " - "time_ns=%ld, threshold=%lld\n", - warg.task_id, warg.name, warg.ptr, warg.length_byte, warg.time_ns, - warg.threshold); - // start watch variable - start_watch_variable(warg); - break; - case 1: - printk(KERN_INFO "variable_monitor ioctl_num 1\n"); - ret = copy_from_user(&dump_param, (ioctl_dump_param *)ioctl_param, - sizeof(ioctl_dump_param)); - printk(KERN_INFO "dump_param: %p %lu %p\n", dump_param.user_ptr_len, dump_param.user_buf_len, dump_param.user_buf); - if (!ret) { - // printk(KERN_INFO "ret\n"); - ret = copy_to_user_variant_buffer( - &load_monitor_variant_buffer, dump_param.user_ptr_len, - dump_param.user_buf, dump_param.user_buf_len); - printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len); - } - printk(KERN_INFO "copy_to_user_variant_buffer \n"); - break; - // case 0: - // printk(KERN_INFO "variable_monitor test 2\n"); - // ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid)); - // diag_test(wpid.pid); - // /* code */ - // break; - default: - // printk(KERN_INFO "variable_monitor test default\n"); - // ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid)); - // diag_test(wpid.pid); - break; + case 0: + // copy watch_arg + if (copy_from_user(&warg, (watch_arg *)ioctl_param, sizeof(warg))) { + return -EACCES; + } + printk(KERN_INFO "Watch_arg: task_id=%d, name=%s, ptr=%p, length_byte=%d, " + "time_ns=%ld, threshold=%lld\n", + warg.task_id, warg.name, warg.ptr, warg.length_byte, warg.time_ns, + warg.threshold); + // start watch variable + start_watch_variable(warg); + break; + case 1: + printk(KERN_INFO "variable_monitor ioctl_num 1\n"); + ret = copy_from_user(&dump_param, (ioctl_dump_param *)ioctl_param, + sizeof(ioctl_dump_param)); + printk(KERN_INFO "dump_param: %p %lu %p\n", dump_param.user_ptr_len, + dump_param.user_buf_len, dump_param.user_buf); + if (!ret) { + // printk(KERN_INFO "ret\n"); + ret = copy_to_user_variant_buffer( + &load_monitor_variant_buffer, dump_param.user_ptr_len, + dump_param.user_buf, dump_param.user_buf_len); + printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len); + } + printk(KERN_INFO "copy_to_user_variant_buffer \n"); + break; + case 2: + printk(KERN_INFO "variable_monitor ioctl_num 2\n"); + ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid)); + diag_test(wpid.pid); + break; + default: + break; } return 0; } diff --git a/source/module/monitor_kernel.h b/source/module/monitor_kernel.h index be6f2ca..dce9913 100644 --- a/source/module/monitor_kernel.h +++ b/source/module/monitor_kernel.h @@ -4,14 +4,14 @@ #include "monitor_trace.h" extern mm_tree mm_tree_struct; -extern struct diag_variant_buffer load_monitor_variant_buffer; +extern struct diag_variant_buffer load_monitor_variant_buffer; // global buffer -int monitor_init(void); -void monitor_exit(void); +int monitor_init(void); // monitor init +void monitor_exit(void); // monitor exit -int start_watch_variable(watch_arg warg); -void clear_watch(pid_t pid); +int start_watch_variable(watch_arg warg); // for open +void clear_watch(pid_t pid); // for release -enum hrtimer_restart check_variable_cb(struct hrtimer *timer); // callback +enum hrtimer_restart check_variable_cb(struct hrtimer *timer); // hrtimer callback -int diag_test(int nid); \ No newline at end of file +int diag_test(int nid); // that is for test \ No newline at end of file diff --git a/source/module/monitor_kernel_lib.c b/source/module/monitor_kernel_lib.c index 4d9b6e8..b93dfbf 100644 --- a/source/module/monitor_kernel_lib.c +++ b/source/module/monitor_kernel_lib.c @@ -33,6 +33,12 @@ static unsigned char w_arg2k_w_arg(void *kptr, watch_arg warg, return 0; } +/** + * @brief kernel_watch_arg to threshold + * + * @param k_watch_arg + * @param threshold + */ static void k_w_arg2threshold(kernel_watch_arg *k_watch_arg, threshold *threshold) { threshold->task_id = k_watch_arg->task_id; @@ -47,6 +53,12 @@ static void init_mm_tree(mm_tree *mm_tree) { spin_lock_init(&mm_tree->mm_tree_lock); } +/** + * @brief init global variable load_monitor_variant_buffer + * + * @param buf_size + * @return int + */ static int init_buffer(unsigned int buf_size) { init_mm_tree(&mm_tree_struct); // init mm_tree init_diag_variant_buffer(&load_monitor_variant_buffer, buf_size); @@ -55,29 +67,32 @@ static int init_buffer(unsigned int buf_size) { return ret; } +/** + * @brief diag task info | brief | user stack | kernel stack | proc chains | raw + * stack + * + * @param p + * @param tsk_info + */ static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) { unsigned int nr_bt; // printk(KERN_INFO "diag_tsk\n"); - diag_task_brief(p, &tsk_info->task); // task brief - // printk("1\n"); - diag_task_user_stack(p, &tsk_info->user_stack); // user stack - // printk("2\n"); + diag_task_brief(p, &tsk_info->task); // task brief + diag_task_user_stack(p, &tsk_info->user_stack); // user stack nr_bt = diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack - // int i = 0; - // printk("pid: %d, kernel stack.stack\n", p->pid); - // for (i = 0; i < nr_bt; i++) { - // printk("%lx\n", tsk_info->kern_stack.stack[i]); - // } - // printk("pid: %d, stack_trace_print\n", p->pid); - // stack_trace_print(tsk_info->kern_stack.stack, nr_bt, 0); /* 打印栈 */ - - // printk("3\n"); dump_proc_chains_argv(1, p, &mm_tree_struct, &tsk_info->proc_chains); // proc chains - diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack + diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack } -static void push_tsk_info(variable_monitor_task *tsk_info,unsigned long *flags) { +/** + * @brief push task info to global buffer + * + * @param tsk_info + * @param flags + */ +static void push_tskinfo_2_buffer(variable_monitor_task *tsk_info, + unsigned long *flags) { // printk(KERN_INFO "push_tsk_info\n"); diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, *flags); diag_variant_buffer_reserve(&load_monitor_variant_buffer, @@ -101,74 +116,84 @@ static void clear_all_watch(void) { memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list)); } -void sample_task_work(struct work_struct *work){ - - kernel_watch_timer *k_watch_timer = container_of(work, kernel_watch_timer, wk); +/** + * @brief diag task info, for work queue + * + * @param work + */ +void diag_task_info_work(struct work_struct *work) { - if (k_watch_timer->threshold_num <= 0) return; + kernel_watch_timer *k_watch_timer = + container_of(work, kernel_watch_timer, wk); - printk(KERN_INFO "sample_task_work\n"); + if (k_watch_timer->threshold_num <= 0) // if no threshold reached + return; - struct task_struct *g, *p; // g: task group; p: task - unsigned long flags; - unsigned long event_id = get_cycles(); - - static variable_monitor_task tsk_info = {0}; - static variable_monitor_record vm_record = {0}; - kernel_watch_arg *kwarg; + printk(KERN_INFO "diag_task_info_work\n"); - vm_record.id = event_id; - vm_record.et_type = 0; //! todo event type - vm_record.tv = ktime_get_real(); - vm_record.threshold_num = k_watch_timer->threshold_num; + struct task_struct *g, *p; // g: task group; p: task + unsigned long flags; + unsigned long event_id = get_cycles(); - int i; - for (i = 0; i < vm_record.threshold_num; i++) { - kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]]; - k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]); + static variable_monitor_task tsk_info = {0}; + static variable_monitor_record vm_record = {0}; + kernel_watch_arg *kwarg; + + vm_record.id = event_id; + vm_record.et_type = 0; //! todo event type + vm_record.tv = ktime_get_real(); + vm_record.threshold_num = k_watch_timer->threshold_num; + + int i; + for (i = 0; i < vm_record.threshold_num; i++) { + kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]]; + k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]); + } + // !todo 调整输出 + printk(KERN_INFO "-------------------------------------\n"); + printk(KERN_INFO "-----------variable monitor----------\n"); + printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv); + + for (i = 0; i < vm_record.threshold_num; i++) { + printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n", + vm_record.threshold_record[i].task_id, + vm_record.threshold_record[i] + .name, // Assuming name is a null-terminated string + vm_record.threshold_record[i].ptr, + vm_record.threshold_record[i].threshold); + } + + rcu_read_lock(); + + diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags); + diag_variant_buffer_reserve(&load_monitor_variant_buffer, + sizeof(variable_monitor_record)); + diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record, + sizeof(variable_monitor_record)); + diag_variant_buffer_seal(&load_monitor_variant_buffer); + diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); + + rcu_read_unlock(); + // for task info + do_each_thread(g, p) { + if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) || + p->__state == TASK_IDLE || 1) { + + get_task_struct(p); // count +1 + + tsk_info.et_type = 1; //! todo event type + tsk_info.id = event_id; + tsk_info.tv = vm_record.tv; + diag_tsk(p, &tsk_info); + + put_task_struct(p); // count -1 + + push_tskinfo_2_buffer(&tsk_info, &flags); // push to buffer } - // !todo 调整输出 - printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv); - - for (i = 0; i < vm_record.threshold_num; i++) { - printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n", - vm_record.threshold_record[i].task_id, - vm_record.threshold_record[i] - .name, // Assuming name is a null-terminated string - vm_record.threshold_record[i].ptr, - vm_record.threshold_record[i].threshold); - } - - rcu_read_lock(); - - diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags); - diag_variant_buffer_reserve(&load_monitor_variant_buffer, - sizeof(variable_monitor_record)); - diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record, - sizeof(variable_monitor_record)); - diag_variant_buffer_seal(&load_monitor_variant_buffer); - diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); - - rcu_read_unlock(); - - do_each_thread(g, p) { - if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) || - p->__state == TASK_IDLE || 1) { - - get_task_struct(p); // count +1 - - tsk_info.et_type = 1; //! todo event type - tsk_info.id = event_id; - tsk_info.tv = vm_record.tv; - diag_tsk(p, &tsk_info); - - put_task_struct(p); // count -1 - - push_tsk_info(&tsk_info, &flags); // push to buffer - } - } - while_each_thread(g, p); - return ; + } + while_each_thread(g, p); + printk("-------------------------------------\n"); + return; } /** * @brief all module function init. orig_X | buffer | workqueue @@ -184,12 +209,6 @@ int monitor_init(void) { ret = init_buffer(50 * 1024 * 1024); // 50M if (ret) return -1; - // init workqueue - // int i; - // for (i=0; i < MAX_TIMER_NUM; i++) { - // kernel_watch_timer *kw_timer = &kernel_wtimer_list[i]; - // INIT_WORK(&kw_timer->wk, sample_task_work); - // } return 0; } @@ -233,7 +252,7 @@ int start_watch_variable(watch_arg warg) { w_arg2k_w_arg(kptr, warg, &k_watch_arg); timer = get_timer(warg.time_ns); // get a valuable timer - INIT_WORK(&timer->wk, sample_task_work); + INIT_WORK(&timer->wk, diag_task_info_work); printk(KERN_INFO "ptr transform kptr: %p\n", kptr); printk(KERN_INFO "timer: %p\n", timer); @@ -262,7 +281,6 @@ void clear_watch(pid_t pid) { start_all_hrTimer(); // restart timer } - /** * @brief main callback function * @@ -298,32 +316,7 @@ enum hrtimer_restart check_variable_cb(struct hrtimer *timer) { return HRTIMER_RESTART; // restart timer } -// static int diag_test(int nid); // for test -// static void test(struct task_struct *p, variable_monitor_task *tsk_info){ -// // unsigned int nr_bt; -// printk(KERN_INFO "diag_tsk\n"); -// diag_task_brief(p, &tsk_info->task); // task brief -// // printk("1\n"); -// diag_task_user_stack(p, &tsk_info->user_stack); // user stack -// diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack -// dump_proc_chains_argv(1, p, &mm_tree_struct, -// &tsk_info->proc_chains); // proc chains -// diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack -// printk(KERN_INFO "diag_tsk finish\n"); -// } - -// static void test2(variable_monitor_task *tsk_info, unsigned long flags){ -// printk(KERN_INFO "test2\n"); -// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags); -// diag_variant_buffer_reserve(&load_monitor_variant_buffer,sizeof(variable_monitor_task)); -// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, tsk_info, -// sizeof(variable_monitor_task)); -// diag_variant_buffer_seal(&load_monitor_variant_buffer); -// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); -// printk(KERN_INFO "test2 finish\n"); -// } - -int diag_test(int nid){ +int diag_test(int nid) { // static struct task_struct *tsk; // static struct task_struct *leader; // static variable_monitor_task tsk_info; @@ -331,24 +324,24 @@ int diag_test(int nid){ // int ret; // unsigned long flags; - // pid_t id = (pid_t)nid; + // pid_t id = (pid_t)nid; // rcu_read_lock(); // tsk = NULL; - // if (orig_find_task_by_vpid) - // tsk = orig_find_task_by_vpid(id); - // if (!tsk) { - // ret = -EINVAL; - // rcu_read_unlock(); - // return ret; - // } + // if (orig_find_task_by_vpid) + // tsk = orig_find_task_by_vpid(id); + // if (!tsk) { + // ret = -EINVAL; + // rcu_read_unlock(); + // return ret; + // } - // leader = tsk->group_leader; - // if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){ - // ret = -EINVAL; - // rcu_read_unlock(); - // return ret; - // } + // leader = tsk->group_leader; + // if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){ + // ret = -EINVAL; + // rcu_read_unlock(); + // return ret; + // } // get_task_struct(tsk); // rcu_read_unlock(); @@ -371,56 +364,56 @@ int diag_test(int nid){ // diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); // printk(KERN_INFO "5\n"); - struct task_struct *g, *p; // g: task group; p: task - unsigned long flags; - unsigned long event_id = get_cycles(); + struct task_struct *g, *p; // g: task group; p: task + unsigned long flags; + unsigned long event_id = get_cycles(); - static variable_monitor_task tsk_info = {0}; - static variable_monitor_record vm_record = {0}; + static variable_monitor_task tsk_info = {0}; + static variable_monitor_record vm_record = {0}; - // vm_record.id = event_id; - // vm_record.et_type = 0; //! todo event type - vm_record.tv = ktime_get_real(); - // vm_record.threshold_num = j; + // vm_record.id = event_id; + // vm_record.et_type = 0; //! todo event type + vm_record.tv = ktime_get_real(); + // vm_record.threshold_num = j; - // printk("-------------------------------------\n"); - // printk("-------------watch monitor-----------\n"); - // printk("Threshold reached:\n"); - // for (i = 0; i < j; i++) { - // kwarg = &k_watch_timer->k_watch_args[buffer[i]]; - // k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]); - // } - // rcu_read_lock(); - - // diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags); - // diag_variant_buffer_reserve(&load_monitor_variant_buffer, - // sizeof(variable_monitor_record)); - // diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record, - // sizeof(variable_monitor_record)); - // diag_variant_buffer_seal(&load_monitor_variant_buffer); - // diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); + // printk("-------------------------------------\n"); + // printk("-------------watch monitor-----------\n"); + // printk("Threshold reached:\n"); + // for (i = 0; i < j; i++) { + // kwarg = &k_watch_timer->k_watch_args[buffer[i]]; + // k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]); + // } + // rcu_read_lock(); - rcu_read_unlock(); + // diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags); + // diag_variant_buffer_reserve(&load_monitor_variant_buffer, + // sizeof(variable_monitor_record)); + // diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record, + // sizeof(variable_monitor_record)); + // diag_variant_buffer_seal(&load_monitor_variant_buffer); + // diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags); - do_each_thread(g, p) { - if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) || - p->__state == TASK_IDLE || 1) { + rcu_read_unlock(); - get_task_struct(p); + do_each_thread(g, p) { + if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) || + p->__state == TASK_IDLE || 1) { - tsk_info.et_type = 1; //! todo event type - tsk_info.id = event_id; - tsk_info.tv = vm_record.tv; + get_task_struct(p); - diag_tsk(p, &tsk_info); - - put_task_struct(p); + tsk_info.et_type = 1; //! todo event type + tsk_info.id = event_id; + tsk_info.tv = vm_record.tv; - push_tsk_info(&tsk_info, &flags); - } + diag_tsk(p, &tsk_info); + + put_task_struct(p); + + push_tskinfo_2_buffer(&tsk_info, &flags); } - while_each_thread(g, p); - printk("-------------------------------------\n"); - + } + while_each_thread(g, p); + printk("-------------------------------------\n"); + return 0; } \ No newline at end of file diff --git a/source/module/monitor_mem.c b/source/module/monitor_mem.c index f6d59eb..17375a0 100644 --- a/source/module/monitor_mem.c +++ b/source/module/monitor_mem.c @@ -1,9 +1,9 @@ #include "monitor_mem.h" -#include // for FOLL_FORCE -#include // pid pid_task -#include /* for kmalloc */ +#include // for FOLL_FORCE +#include // pid pid_task #include // get_task_mm +#include /* for kmalloc */ /// @brief transfer user space address to kernel space address /// change static global "kaddr" and "page" value diff --git a/source/module/monitor_mem.h b/source/module/monitor_mem.h index 20fdff2..391c609 100644 --- a/source/module/monitor_mem.h +++ b/source/module/monitor_mem.h @@ -4,10 +4,10 @@ // #include typedef struct { - int __user *user_ptr_len; - size_t __user user_buf_len; - void __user *user_buf; -} ioctl_dump_param; + int __user *user_ptr_len; + size_t __user user_buf_len; + void __user *user_buf; +} ioctl_dump_param; // for ioctl_num 1; typedef struct { pid_t task_id; // current process id diff --git a/source/module/monitor_trace.c b/source/module/monitor_trace.c index 7ef7fdf..fe44a20 100644 --- a/source/module/monitor_trace.c +++ b/source/module/monitor_trace.c @@ -46,6 +46,13 @@ static inline int orig_diag_cgroup_name(struct cgroup *cgrp, char *buf, } } +/** + * @brief find mm_info by mm + * + * @param mm_tree + * @param mm + * @return mm_info* + */ static inline mm_info *find_mm_info(mm_tree *mm_tree, struct mm_struct *mm) { mm_info *info; if (mm == NULL) @@ -76,6 +83,13 @@ static void diag_cgroup_name(struct task_struct *tsk, char *buf, __diag_cgroup_name(tsk, buf, count, cgroup); } +/** + * @brief copy stack frame by fp + * + * @param fp + * @param frame + * @return int + */ static int copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) { int ret; @@ -97,17 +111,23 @@ static int copy_stack_frame_remote(struct task_struct *tsk, mm = get_task_mm(tsk); if (!mm) { - printk("copy_stack_frame_remote %d get_task_mm fail\n", tsk->pid); + printk(KERN_INFO "copy_stack_frame_remote %d get_task_mm fail\n", tsk->pid); return 0; } ret = orig_access_remote_vm(mm, (unsigned long)fp, frame, sizeof(*frame), 0); - // printk("copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret); + // printk(KERN_INFO "copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret); mmput(mm); return ret; } +/** + * @brief save stack trace | not current task + * + * @param tsk + * @param trace + */ static inline void save_stack_trace_user_remote(struct task_struct *tsk, struct stack_trace *trace) { const struct pt_regs *regs = task_pt_regs(tsk); @@ -115,12 +135,14 @@ static inline void save_stack_trace_user_remote(struct task_struct *tsk, int count = 0; if (in_atomic()) { - printk("save_stack_trace_user_remote %d in_atomic\n", tsk->pid); + printk(KERN_INFO "save_stack_trace_user_remote %d: task in_atomic\n", + tsk->pid); return; } if (irqs_disabled()) { - printk("save_stack_trace_user_remote %d irqs_disabled\n", tsk->pid); + printk(KERN_INFO "save_stack_trace_user_remote %d: task in irqs_disabled\n", + tsk->pid); return; } @@ -134,30 +156,34 @@ static inline void save_stack_trace_user_remote(struct task_struct *tsk, frame.ret_addr = 0; if (!copy_stack_frame_remote(tsk, fp, &frame)) { - // printk("save_stack_trace_user_remote %d copy_stack_frame_remote fail\n", + // printk(KERN_INFO "save_stack_trace_user_remote %d + // copy_stack_frame_remote fail\n", // tsk->pid); break; } if ((unsigned long)fp < regs->sp) { - // printk("save_stack_trace_user_remote %d fp < sp count:%d\n", tsk->pid, + // printk(KERN_INFO "save_stack_trace_user_remote %d fp < sp count:%d\n", + // tsk->pid, // count); break; // 如果fp小于sp,说明已经到了栈底,退出 } // 如果返回地址不为0,说明是一个有效的栈帧,保存返回地址 if (frame.ret_addr) { trace->entries[trace->nr_entries++] = frame.ret_addr; - // printk("save_stack_trace_user_remote %d ret_addr:%lx\n", tsk->pid, - // frame.ret_addr); + // printk(KERN_INFO "save_stack_trace_user_remote %d ret_addr:%lx\n", + // tsk->pid, + // frame.ret_addr); } else { - // printk("save_stack_trace_user_remote %d no ret_addr", tsk->pid); + // printk(KERN_INFO "save_stack_trace_user_remote %d no ret_addr", + // tsk->pid); break; - // continue; } // 如果fp指向自己,说明已经到了栈底,退出 if (fp == frame.next_fp) { - // printk("save_stack_trace_user_remote %d fp == next_fp", tsk->pid); + // printk(KERN_INFO "save_stack_trace_user_remote %d fp == next_fp", + // tsk->pid); break; } fp = frame.next_fp; // 否则,继续向下遍历 @@ -214,6 +240,11 @@ static void perfect_save_stack_trace_user(struct stack_trace *trace) { trace->entries[trace->nr_entries++] = ULONG_MAX; } +/** + * @brief save stack trace | current task + * + * @param backtrace + */ static void diagnose_save_stack_trace_user(unsigned long *backtrace) { struct stack_trace trace; @@ -224,6 +255,13 @@ static void diagnose_save_stack_trace_user(unsigned long *backtrace) { perfect_save_stack_trace_user(&trace); } +/** + * @brief save stack trace | not current task + * + * @param tsk + * @param backtrace + */ + static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk, unsigned long *backtrace) { struct stack_trace trace; @@ -237,18 +275,11 @@ static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk, * Trace user stack if we are not a kernel thread */ if (tsk->mm) { - // printk("save_stack_trace_user_remote %d mm\n", tsk->pid); + // printk(KERN_INFO "save_stack_trace_user_remote %d mm\n", tsk->pid); save_stack_trace_user_remote(tsk, &trace); } if (trace.nr_entries < trace.max_entries) trace.entries[trace.nr_entries++] = ULONG_MAX; - - // printk("save_stack_trace_user_remote %d, stack: [", tsk->pid); - // int i = 0; - // for (i = 0; i < BACKTRACE_DEPTH; i++) { - // printk("%lx, ", backtrace[i]); - // } - // printk("]\n"); } static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to, @@ -257,12 +288,12 @@ static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to, struct mm_struct *mm; if (in_atomic()) { - printk("task_raw_stack_remote %d in_atomic\n", tsk->pid); + printk(KERN_INFO "task_raw_stack_remote %d in_atomic\n", tsk->pid); return 0; } if (irqs_disabled()) { - printk("task_raw_stack_remote %d irqs_disabled\n", tsk->pid); + printk(KERN_INFO "task_raw_stack_remote %d irqs_disabled\n", tsk->pid); return 0; } @@ -277,7 +308,8 @@ static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to, ret = orig_access_remote_vm(mm, (unsigned long)from, to, n, 0); mmput(mm); - // printk("task_raw_stack_remote %d access_remote_vm ret: %d\n", tsk->pid, ret); + // printk(KERN_INFO "task_raw_stack_remote %d access_remote_vm ret: %d\n", + // tsk->pid, ret); return ret < 0 ? ret : 0; } @@ -384,19 +416,34 @@ void diag_task_user_stack(struct task_struct *tsk, user_stack_detail *detail) { detail->bp = bp; if (tsk == current) { - // printk("diag_task_user_stack %d current\n", tsk->pid); + // printk(KERN_INFO "diag_task_user_stack %d current\n", tsk->pid); diagnose_save_stack_trace_user(detail->stack); } else { - // printk("diag_task_user_stack %d no current\n", tsk->pid); + // printk(KERN_INFO "diag_task_user_stack %d no current\n", tsk->pid); diagnose_save_stack_trace_user_remote(tsk, detail->stack); } } +/** + * @brief diag task kernel stack | -> to orig_stack_trace_save_tsk + * + * @param tsk + * @param detail + * @return unsigned int + */ unsigned int diag_task_kern_stack(struct task_struct *tsk, kern_stack_detail *detail) { return orig_stack_trace_save_tsk(tsk, detail->stack, BACKTRACE_DEPTH, 0); } +/** + * @brief diag task proc chains + * + * @param style + * @param tsk + * @param mm_tree + * @param detail + */ void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree, proc_chains_detail *detail) { struct task_struct *walker; @@ -438,11 +485,6 @@ void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree, detail->full_argv[cnt] = 0; } detail->tgid[cnt] = walker->pid; - // if ((detail->tgid[cnt] != 0) | (detail->full_argv[cnt] != 0)) { - // printk("pid: %d,full_argv: %d, chains: %s, cnt:%d\n", - // detail->tgid[cnt], - // detail->full_argv[cnt], detail->chains[cnt], cnt); - // } walker = rcu_dereference(walker->real_parent); cnt++; if (cnt >= PROCESS_CHAINS_COUNT) @@ -453,9 +495,9 @@ void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree, /** * @brief copy task raw stack - * - * @param tsk - * @param detail + * + * @param tsk + * @param detail */ void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) { struct pt_regs *regs; @@ -493,7 +535,8 @@ void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) { ret = diagnose_task_raw_stack_remote( tsk, stack, (void __user *)sp + detail->stack_size, 1024); } - // printk("diag_task_raw_stack %d i:%d ret:%d\n", tsk->pid, i, ret); + // printk(KERN_INFO "diag_task_raw_stack %d i:%d ret:%d\n", tsk->pid, i, + // ret); if (ret) break; else @@ -501,71 +544,4 @@ void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) { stack += 1024; } -} - -/// @brief print all task stack -/// @param -// static void print_task_stack(void) { -// struct task_struct *g, *p; // g: task group; p: task -// unsigned long backtrace[BACKTRACE_DEPTH]; // save stack -// unsigned int nr_bt; // stack depth -// unsigned long long current_time; // last time -// current_time = ktime_get_real(); -// printk("Timestamp (ns): %lld\n", current_time); -// printk("Recent Load: %lu.%02lu, %lu.%02lu, %lu.%02lu\n", // recent load -// LOAD_INT(avenrun[0]), LOAD_FRAC(avenrun[0]), LOAD_INT(avenrun[1]), -// LOAD_FRAC(avenrun[1]), LOAD_INT(avenrun[2]), LOAD_FRAC(avenrun[2])); -// rcu_read_lock(); // lock run queue -// // printk("Running task\n"); -// do_each_thread(g, p) { -// if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) || -// p->__state == TASK_IDLE) { -// printk("task: %s, pid %d, state %d\n", p->comm, p->pid, -// p->__state); //! todo -// nr_bt = orig_stack_trace_save_tsk(p, backtrace, BACKTRACE_DEPTH, 0); -// stack_trace_print(backtrace, nr_bt, 0); // print -// } -// } -// while_each_thread(g, p); -// rcu_read_unlock(); // unlock run queue -// } - -// void diag_printf_kern_stack(kern_stack_detail *kern_stack, int reverse) { -// int i; -// symbol sym; - -// printf(" 内核态堆栈:\n"); -// if (reverse) { -// for (i = BACKTRACE_DEPTH - 1; i >= 0; i--) { -// if (kern_stack->stack[i] == (size_t)-1 || kern_stack->stack[i] == 0) { -// continue; -// } -// sym.reset(kern_stack->stack[i]); -// if (g_symbol_parser.find_kernel_symbol(sym)) { -// printf("#@ 0x%lx %s ([kernel.kallsyms])\n", -// kern_stack->stack[i], -// sym.name.c_str()); -// } else { -// printf("#@ 0x%lx %s\n", kern_stack->stack[i], "UNKNOWN"); -// } -// } -// } else { -// for (i = 0; i < BACKTRACE_DEPTH; i++) { -// if (kern_stack->stack[i] == (size_t)-1 || kern_stack->stack[i] == 0) { -// break; -// } -// sym.reset(kern_stack->stack[i]); -// if (g_symbol_parser.find_kernel_symbol(sym)) { -// printf("#@ 0x%lx %s ([kernel.kallsyms])\n", -// kern_stack->stack[i], -// sym.name.c_str()); -// } else { -// printf("#@ 0x%lx %s\n", kern_stack->stack[i], "UNKNOWN"); -// } -// } -// } -// } - -// void diag_printf_kern_stack(struct diag_kern_stack_detail *kern_stack) { -// diag_printf_kern_stack(kern_stack, 0); -// } +} \ No newline at end of file diff --git a/source/module/monitor_trace.h b/source/module/monitor_trace.h index a3932a9..a6cc21e 100644 --- a/source/module/monitor_trace.h +++ b/source/module/monitor_trace.h @@ -56,11 +56,11 @@ typedef struct { */ unsigned long user_mode; char comm[TASK_COMM_LEN]; -} task_detail; +} task_detail; // task brief typedef struct { unsigned long stack[BACKTRACE_DEPTH]; -} kern_stack_detail; +} kern_stack_detail; // kernel stack typedef struct { struct pt_regs regs; @@ -68,15 +68,15 @@ typedef struct { unsigned long bp; unsigned long sp; unsigned long stack[BACKTRACE_DEPTH]; -} user_stack_detail; +} user_stack_detail; // user stack typedef struct { struct pt_regs regs; unsigned long ip; - unsigned long bp; - unsigned long sp; - unsigned long stack_size; - unsigned long stack[DIAG_USER_STACK_SIZE / sizeof(unsigned long)]; + unsigned long bp; + unsigned long sp; + unsigned long stack_size; + unsigned long stack[DIAG_USER_STACK_SIZE / sizeof(unsigned long)]; } raw_stack_detail; typedef struct { @@ -95,7 +95,7 @@ typedef struct { kern_stack_detail kern_stack; // kernel stack proc_chains_detail proc_chains; // process chains argv raw_stack_detail raw_stack; -} variable_monitor_task; +} variable_monitor_task; // main struct typedef struct { struct radix_tree_root mm_tree; @@ -109,14 +109,15 @@ void diag_task_brief(struct task_struct *tsk, task_detail *detail); // get task brief void diag_task_user_stack(struct task_struct *tsk, user_stack_detail *detail); // get task user stack -void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail); // get task raw stack -unsigned int diag_task_kern_stack(struct task_struct *tsk, - kern_stack_detail *detail); // get task kernel stack +void diag_task_raw_stack(struct task_struct *tsk, + raw_stack_detail *detail); // get task raw stack +unsigned int +diag_task_kern_stack(struct task_struct *tsk, + kern_stack_detail *detail); // get task kernel stack void dump_proc_chains_argv( int style, struct task_struct *tsk, mm_tree *mm_tree, proc_chains_detail *detail); // get process chains argv - // print // void diag_printf_kern_stack(kern_stack_detail *kern_stack); // void diag_printf_kern_stack(kern_stack_detail *kern_stack, int reverse);