module: code clean

This commit is contained in:
zy
2023-11-27 15:16:35 +08:00
parent a04078c068
commit 49fb0b5f2e
8 changed files with 310 additions and 338 deletions

View File

@@ -33,6 +33,12 @@ static unsigned char w_arg2k_w_arg(void *kptr, watch_arg warg,
return 0;
}
/**
* @brief kernel_watch_arg to threshold
*
* @param k_watch_arg
* @param threshold
*/
static void k_w_arg2threshold(kernel_watch_arg *k_watch_arg,
threshold *threshold) {
threshold->task_id = k_watch_arg->task_id;
@@ -47,6 +53,12 @@ static void init_mm_tree(mm_tree *mm_tree) {
spin_lock_init(&mm_tree->mm_tree_lock);
}
/**
* @brief init global variable load_monitor_variant_buffer
*
* @param buf_size
* @return int
*/
static int init_buffer(unsigned int buf_size) {
init_mm_tree(&mm_tree_struct); // init mm_tree
init_diag_variant_buffer(&load_monitor_variant_buffer, buf_size);
@@ -55,29 +67,32 @@ static int init_buffer(unsigned int buf_size) {
return ret;
}
/**
* @brief diag task info | brief | user stack | kernel stack | proc chains | raw
* stack
*
* @param p
* @param tsk_info
*/
static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) {
unsigned int nr_bt;
// printk(KERN_INFO "diag_tsk\n");
diag_task_brief(p, &tsk_info->task); // task brief
// printk("1\n");
diag_task_user_stack(p, &tsk_info->user_stack); // user stack
// printk("2\n");
diag_task_brief(p, &tsk_info->task); // task brief
diag_task_user_stack(p, &tsk_info->user_stack); // user stack
nr_bt = diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack
// int i = 0;
// printk("pid: %d, kernel stack.stack\n", p->pid);
// for (i = 0; i < nr_bt; i++) {
// printk("%lx\n", tsk_info->kern_stack.stack[i]);
// }
// printk("pid: %d, stack_trace_print\n", p->pid);
// stack_trace_print(tsk_info->kern_stack.stack, nr_bt, 0); /* 打印栈 */
// printk("3\n");
dump_proc_chains_argv(1, p, &mm_tree_struct,
&tsk_info->proc_chains); // proc chains
diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
}
static void push_tsk_info(variable_monitor_task *tsk_info,unsigned long *flags) {
/**
* @brief push task info to global buffer
*
* @param tsk_info
* @param flags
*/
static void push_tskinfo_2_buffer(variable_monitor_task *tsk_info,
unsigned long *flags) {
// printk(KERN_INFO "push_tsk_info\n");
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, *flags);
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
@@ -101,74 +116,84 @@ static void clear_all_watch(void) {
memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list));
}
void sample_task_work(struct work_struct *work){
kernel_watch_timer *k_watch_timer = container_of(work, kernel_watch_timer, wk);
/**
* @brief diag task info, for work queue
*
* @param work
*/
void diag_task_info_work(struct work_struct *work) {
if (k_watch_timer->threshold_num <= 0) return;
kernel_watch_timer *k_watch_timer =
container_of(work, kernel_watch_timer, wk);
printk(KERN_INFO "sample_task_work\n");
if (k_watch_timer->threshold_num <= 0) // if no threshold reached
return;
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
kernel_watch_arg *kwarg;
printk(KERN_INFO "diag_task_info_work\n");
vm_record.id = event_id;
vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
vm_record.threshold_num = k_watch_timer->threshold_num;
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
int i;
for (i = 0; i < vm_record.threshold_num; i++) {
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
kernel_watch_arg *kwarg;
vm_record.id = event_id;
vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
vm_record.threshold_num = k_watch_timer->threshold_num;
int i;
for (i = 0; i < vm_record.threshold_num; i++) {
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
}
// !todo 调整输出
printk(KERN_INFO "-------------------------------------\n");
printk(KERN_INFO "-----------variable monitor----------\n");
printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv);
for (i = 0; i < vm_record.threshold_num; i++) {
printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n",
vm_record.threshold_record[i].task_id,
vm_record.threshold_record[i]
.name, // Assuming name is a null-terminated string
vm_record.threshold_record[i].ptr,
vm_record.threshold_record[i].threshold);
}
rcu_read_lock();
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
sizeof(variable_monitor_record));
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
sizeof(variable_monitor_record));
diag_variant_buffer_seal(&load_monitor_variant_buffer);
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
rcu_read_unlock();
// for task info
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
get_task_struct(p); // count +1
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
diag_tsk(p, &tsk_info);
put_task_struct(p); // count -1
push_tskinfo_2_buffer(&tsk_info, &flags); // push to buffer
}
// !todo 调整输出
printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv);
for (i = 0; i < vm_record.threshold_num; i++) {
printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n",
vm_record.threshold_record[i].task_id,
vm_record.threshold_record[i]
.name, // Assuming name is a null-terminated string
vm_record.threshold_record[i].ptr,
vm_record.threshold_record[i].threshold);
}
rcu_read_lock();
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
sizeof(variable_monitor_record));
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
sizeof(variable_monitor_record));
diag_variant_buffer_seal(&load_monitor_variant_buffer);
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
rcu_read_unlock();
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
get_task_struct(p); // count +1
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
diag_tsk(p, &tsk_info);
put_task_struct(p); // count -1
push_tsk_info(&tsk_info, &flags); // push to buffer
}
}
while_each_thread(g, p);
return ;
}
while_each_thread(g, p);
printk("-------------------------------------\n");
return;
}
/**
* @brief all module function init. orig_X | buffer | workqueue
@@ -184,12 +209,6 @@ int monitor_init(void) {
ret = init_buffer(50 * 1024 * 1024); // 50M
if (ret)
return -1;
// init workqueue
// int i;
// for (i=0; i < MAX_TIMER_NUM; i++) {
// kernel_watch_timer *kw_timer = &kernel_wtimer_list[i];
// INIT_WORK(&kw_timer->wk, sample_task_work);
// }
return 0;
}
@@ -233,7 +252,7 @@ int start_watch_variable(watch_arg warg) {
w_arg2k_w_arg(kptr, warg, &k_watch_arg);
timer = get_timer(warg.time_ns); // get a valuable timer
INIT_WORK(&timer->wk, sample_task_work);
INIT_WORK(&timer->wk, diag_task_info_work);
printk(KERN_INFO "ptr transform kptr: %p\n", kptr);
printk(KERN_INFO "timer: %p\n", timer);
@@ -262,7 +281,6 @@ void clear_watch(pid_t pid) {
start_all_hrTimer(); // restart timer
}
/**
* @brief main callback function
*
@@ -298,32 +316,7 @@ enum hrtimer_restart check_variable_cb(struct hrtimer *timer) {
return HRTIMER_RESTART; // restart timer
}
// static int diag_test(int nid); // for test
// static void test(struct task_struct *p, variable_monitor_task *tsk_info){
// // unsigned int nr_bt;
// printk(KERN_INFO "diag_tsk\n");
// diag_task_brief(p, &tsk_info->task); // task brief
// // printk("1\n");
// diag_task_user_stack(p, &tsk_info->user_stack); // user stack
// diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack
// dump_proc_chains_argv(1, p, &mm_tree_struct,
// &tsk_info->proc_chains); // proc chains
// diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
// printk(KERN_INFO "diag_tsk finish\n");
// }
// static void test2(variable_monitor_task *tsk_info, unsigned long flags){
// printk(KERN_INFO "test2\n");
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,sizeof(variable_monitor_task));
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, tsk_info,
// sizeof(variable_monitor_task));
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
// printk(KERN_INFO "test2 finish\n");
// }
int diag_test(int nid){
int diag_test(int nid) {
// static struct task_struct *tsk;
// static struct task_struct *leader;
// static variable_monitor_task tsk_info;
@@ -331,24 +324,24 @@ int diag_test(int nid){
// int ret;
// unsigned long flags;
// pid_t id = (pid_t)nid;
// pid_t id = (pid_t)nid;
// rcu_read_lock();
// tsk = NULL;
// if (orig_find_task_by_vpid)
// tsk = orig_find_task_by_vpid(id);
// if (!tsk) {
// ret = -EINVAL;
// rcu_read_unlock();
// return ret;
// }
// if (orig_find_task_by_vpid)
// tsk = orig_find_task_by_vpid(id);
// if (!tsk) {
// ret = -EINVAL;
// rcu_read_unlock();
// return ret;
// }
// leader = tsk->group_leader;
// if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){
// ret = -EINVAL;
// rcu_read_unlock();
// return ret;
// }
// leader = tsk->group_leader;
// if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){
// ret = -EINVAL;
// rcu_read_unlock();
// return ret;
// }
// get_task_struct(tsk);
// rcu_read_unlock();
@@ -371,56 +364,56 @@ int diag_test(int nid){
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
// printk(KERN_INFO "5\n");
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
// vm_record.id = event_id;
// vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
// vm_record.threshold_num = j;
// vm_record.id = event_id;
// vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
// vm_record.threshold_num = j;
// printk("-------------------------------------\n");
// printk("-------------watch monitor-----------\n");
// printk("Threshold reached:\n");
// for (i = 0; i < j; i++) {
// kwarg = &k_watch_timer->k_watch_args[buffer[i]];
// k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
// }
// rcu_read_lock();
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,
// sizeof(variable_monitor_record));
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
// sizeof(variable_monitor_record));
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
// printk("-------------------------------------\n");
// printk("-------------watch monitor-----------\n");
// printk("Threshold reached:\n");
// for (i = 0; i < j; i++) {
// kwarg = &k_watch_timer->k_watch_args[buffer[i]];
// k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
// }
// rcu_read_lock();
rcu_read_unlock();
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,
// sizeof(variable_monitor_record));
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
// sizeof(variable_monitor_record));
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
rcu_read_unlock();
get_task_struct(p);
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
get_task_struct(p);
diag_tsk(p, &tsk_info);
put_task_struct(p);
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
push_tsk_info(&tsk_info, &flags);
}
diag_tsk(p, &tsk_info);
put_task_struct(p);
push_tskinfo_2_buffer(&tsk_info, &flags);
}
while_each_thread(g, p);
printk("-------------------------------------\n");
}
while_each_thread(g, p);
printk("-------------------------------------\n");
return 0;
}