module: code clean
This commit is contained in:
@@ -4,6 +4,7 @@ unsigned long (*diag_kallsyms_lookup_name)(const char *name);
|
|||||||
struct kprobe kprobe_kallsyms_lookup_name = {.symbol_name =
|
struct kprobe kprobe_kallsyms_lookup_name = {.symbol_name =
|
||||||
"kallsyms_lookup_name"};
|
"kallsyms_lookup_name"};
|
||||||
|
|
||||||
|
// orig_fun
|
||||||
unsigned int (*orig_stack_trace_save_tsk)(struct task_struct *task,
|
unsigned int (*orig_stack_trace_save_tsk)(struct task_struct *task,
|
||||||
unsigned long *store,
|
unsigned long *store,
|
||||||
unsigned int size,
|
unsigned int size,
|
||||||
@@ -18,7 +19,11 @@ int (*orig_access_remote_vm)(struct mm_struct *mm, unsigned long addr,
|
|||||||
void *buf, int len, unsigned int gup_flags);
|
void *buf, int len, unsigned int gup_flags);
|
||||||
struct task_struct *(*orig_find_task_by_vpid)(pid_t nr);
|
struct task_struct *(*orig_find_task_by_vpid)(pid_t nr);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief diag_kallsyms_lookup_name init
|
||||||
|
*
|
||||||
|
* @return int
|
||||||
|
*/
|
||||||
static int fn_kallsyms_lookup_name_init(void) {
|
static int fn_kallsyms_lookup_name_init(void) {
|
||||||
register_kprobe(&kprobe_kallsyms_lookup_name);
|
register_kprobe(&kprobe_kallsyms_lookup_name);
|
||||||
diag_kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr;
|
diag_kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr;
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
#include "monitor_kernel.h"
|
#include "monitor_kernel.h"
|
||||||
|
|
||||||
#include <linux/cdev.h> // for cdev
|
#include <linux/cdev.h> // for cdev
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h> // for kmalloc
|
#include <linux/slab.h> // for kmalloc
|
||||||
|
|
||||||
#define DEVICE_NAME "variable_monitor"
|
#define DEVICE_NAME "variable_monitor"
|
||||||
|
|
||||||
@@ -25,7 +25,8 @@ static int device_open(struct inode *inode, struct file *file) {
|
|||||||
current->pid);
|
current->pid);
|
||||||
// save pid
|
// save pid
|
||||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||||
if (!data) return -ENOMEM;
|
if (!data)
|
||||||
|
return -ENOMEM;
|
||||||
data->pid = current->pid;
|
data->pid = current->pid;
|
||||||
file->private_data = data;
|
file->private_data = data;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -38,12 +39,12 @@ static int device_release(struct inode *inode, struct file *file) {
|
|||||||
data->pid);
|
data->pid);
|
||||||
// clear watch with pid
|
// clear watch with pid
|
||||||
clear_watch(data->pid);
|
clear_watch(data->pid);
|
||||||
kfree(data); // free data memory
|
kfree(data); // free data memory
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int pid;
|
int pid;
|
||||||
} ioctl_pid;
|
} ioctl_pid;
|
||||||
|
|
||||||
static long device_ioctl(struct file *file, unsigned int ioctl_num,
|
static long device_ioctl(struct file *file, unsigned int ioctl_num,
|
||||||
@@ -51,50 +52,46 @@ static long device_ioctl(struct file *file, unsigned int ioctl_num,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
watch_arg warg;
|
watch_arg warg;
|
||||||
ioctl_dump_param dump_param;
|
ioctl_dump_param dump_param;
|
||||||
// ioctl_pid wpid;
|
ioctl_pid wpid;
|
||||||
|
|
||||||
printk(KERN_INFO "variable_monitor fun: %s with ioctl_num %d\n", __FUNCTION__,
|
printk(KERN_INFO "variable_monitor fun: %s with ioctl_num %d\n", __FUNCTION__,
|
||||||
ioctl_num);
|
ioctl_num);
|
||||||
|
|
||||||
switch (ioctl_num) {
|
switch (ioctl_num) {
|
||||||
case 0:
|
case 0:
|
||||||
// copy watch_arg
|
// copy watch_arg
|
||||||
if (copy_from_user(&warg, (watch_arg *)ioctl_param, sizeof(warg))) {
|
if (copy_from_user(&warg, (watch_arg *)ioctl_param, sizeof(warg))) {
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO "Watch_arg: task_id=%d, name=%s, ptr=%p, length_byte=%d, "
|
||||||
"Watch_arg: task_id=%d, name=%s, ptr=%p, length_byte=%d, "
|
"time_ns=%ld, threshold=%lld\n",
|
||||||
"time_ns=%ld, threshold=%lld\n",
|
warg.task_id, warg.name, warg.ptr, warg.length_byte, warg.time_ns,
|
||||||
warg.task_id, warg.name, warg.ptr, warg.length_byte, warg.time_ns,
|
warg.threshold);
|
||||||
warg.threshold);
|
// start watch variable
|
||||||
// start watch variable
|
start_watch_variable(warg);
|
||||||
start_watch_variable(warg);
|
break;
|
||||||
break;
|
case 1:
|
||||||
case 1:
|
printk(KERN_INFO "variable_monitor ioctl_num 1\n");
|
||||||
printk(KERN_INFO "variable_monitor ioctl_num 1\n");
|
ret = copy_from_user(&dump_param, (ioctl_dump_param *)ioctl_param,
|
||||||
ret = copy_from_user(&dump_param, (ioctl_dump_param *)ioctl_param,
|
sizeof(ioctl_dump_param));
|
||||||
sizeof(ioctl_dump_param));
|
printk(KERN_INFO "dump_param: %p %lu %p\n", dump_param.user_ptr_len,
|
||||||
printk(KERN_INFO "dump_param: %p %lu %p\n", dump_param.user_ptr_len, dump_param.user_buf_len, dump_param.user_buf);
|
dump_param.user_buf_len, dump_param.user_buf);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
// printk(KERN_INFO "ret\n");
|
// printk(KERN_INFO "ret\n");
|
||||||
ret = copy_to_user_variant_buffer(
|
ret = copy_to_user_variant_buffer(
|
||||||
&load_monitor_variant_buffer, dump_param.user_ptr_len,
|
&load_monitor_variant_buffer, dump_param.user_ptr_len,
|
||||||
dump_param.user_buf, dump_param.user_buf_len);
|
dump_param.user_buf, dump_param.user_buf_len);
|
||||||
printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len);
|
printk(KERN_INFO "ret %d, %lu\n", ret, dump_param.user_buf_len);
|
||||||
}
|
}
|
||||||
printk(KERN_INFO "copy_to_user_variant_buffer \n");
|
printk(KERN_INFO "copy_to_user_variant_buffer \n");
|
||||||
break;
|
break;
|
||||||
// case 0:
|
case 2:
|
||||||
// printk(KERN_INFO "variable_monitor test 2\n");
|
printk(KERN_INFO "variable_monitor ioctl_num 2\n");
|
||||||
// ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid));
|
ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid));
|
||||||
// diag_test(wpid.pid);
|
diag_test(wpid.pid);
|
||||||
// /* code */
|
break;
|
||||||
// break;
|
default:
|
||||||
default:
|
break;
|
||||||
// printk(KERN_INFO "variable_monitor test default\n");
|
|
||||||
// ret = copy_from_user(&wpid, (ioctl_pid *)ioctl_param, sizeof(ioctl_pid));
|
|
||||||
// diag_test(wpid.pid);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,14 +4,14 @@
|
|||||||
#include "monitor_trace.h"
|
#include "monitor_trace.h"
|
||||||
|
|
||||||
extern mm_tree mm_tree_struct;
|
extern mm_tree mm_tree_struct;
|
||||||
extern struct diag_variant_buffer load_monitor_variant_buffer;
|
extern struct diag_variant_buffer load_monitor_variant_buffer; // global buffer
|
||||||
|
|
||||||
int monitor_init(void);
|
int monitor_init(void); // monitor init
|
||||||
void monitor_exit(void);
|
void monitor_exit(void); // monitor exit
|
||||||
|
|
||||||
int start_watch_variable(watch_arg warg);
|
int start_watch_variable(watch_arg warg); // for open
|
||||||
void clear_watch(pid_t pid);
|
void clear_watch(pid_t pid); // for release
|
||||||
|
|
||||||
enum hrtimer_restart check_variable_cb(struct hrtimer *timer); // callback
|
enum hrtimer_restart check_variable_cb(struct hrtimer *timer); // hrtimer callback
|
||||||
|
|
||||||
int diag_test(int nid);
|
int diag_test(int nid); // that is for test
|
||||||
@@ -33,6 +33,12 @@ static unsigned char w_arg2k_w_arg(void *kptr, watch_arg warg,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief kernel_watch_arg to threshold
|
||||||
|
*
|
||||||
|
* @param k_watch_arg
|
||||||
|
* @param threshold
|
||||||
|
*/
|
||||||
static void k_w_arg2threshold(kernel_watch_arg *k_watch_arg,
|
static void k_w_arg2threshold(kernel_watch_arg *k_watch_arg,
|
||||||
threshold *threshold) {
|
threshold *threshold) {
|
||||||
threshold->task_id = k_watch_arg->task_id;
|
threshold->task_id = k_watch_arg->task_id;
|
||||||
@@ -47,6 +53,12 @@ static void init_mm_tree(mm_tree *mm_tree) {
|
|||||||
spin_lock_init(&mm_tree->mm_tree_lock);
|
spin_lock_init(&mm_tree->mm_tree_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief init global variable load_monitor_variant_buffer
|
||||||
|
*
|
||||||
|
* @param buf_size
|
||||||
|
* @return int
|
||||||
|
*/
|
||||||
static int init_buffer(unsigned int buf_size) {
|
static int init_buffer(unsigned int buf_size) {
|
||||||
init_mm_tree(&mm_tree_struct); // init mm_tree
|
init_mm_tree(&mm_tree_struct); // init mm_tree
|
||||||
init_diag_variant_buffer(&load_monitor_variant_buffer, buf_size);
|
init_diag_variant_buffer(&load_monitor_variant_buffer, buf_size);
|
||||||
@@ -55,29 +67,32 @@ static int init_buffer(unsigned int buf_size) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief diag task info | brief | user stack | kernel stack | proc chains | raw
|
||||||
|
* stack
|
||||||
|
*
|
||||||
|
* @param p
|
||||||
|
* @param tsk_info
|
||||||
|
*/
|
||||||
static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) {
|
static void diag_tsk(struct task_struct *p, variable_monitor_task *tsk_info) {
|
||||||
unsigned int nr_bt;
|
unsigned int nr_bt;
|
||||||
// printk(KERN_INFO "diag_tsk\n");
|
// printk(KERN_INFO "diag_tsk\n");
|
||||||
diag_task_brief(p, &tsk_info->task); // task brief
|
diag_task_brief(p, &tsk_info->task); // task brief
|
||||||
// printk("1\n");
|
diag_task_user_stack(p, &tsk_info->user_stack); // user stack
|
||||||
diag_task_user_stack(p, &tsk_info->user_stack); // user stack
|
|
||||||
// printk("2\n");
|
|
||||||
nr_bt = diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack
|
nr_bt = diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack
|
||||||
// int i = 0;
|
|
||||||
// printk("pid: %d, kernel stack.stack\n", p->pid);
|
|
||||||
// for (i = 0; i < nr_bt; i++) {
|
|
||||||
// printk("%lx\n", tsk_info->kern_stack.stack[i]);
|
|
||||||
// }
|
|
||||||
// printk("pid: %d, stack_trace_print\n", p->pid);
|
|
||||||
// stack_trace_print(tsk_info->kern_stack.stack, nr_bt, 0); /* 打印栈 */
|
|
||||||
|
|
||||||
// printk("3\n");
|
|
||||||
dump_proc_chains_argv(1, p, &mm_tree_struct,
|
dump_proc_chains_argv(1, p, &mm_tree_struct,
|
||||||
&tsk_info->proc_chains); // proc chains
|
&tsk_info->proc_chains); // proc chains
|
||||||
diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
|
diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
|
||||||
}
|
}
|
||||||
|
|
||||||
static void push_tsk_info(variable_monitor_task *tsk_info,unsigned long *flags) {
|
/**
|
||||||
|
* @brief push task info to global buffer
|
||||||
|
*
|
||||||
|
* @param tsk_info
|
||||||
|
* @param flags
|
||||||
|
*/
|
||||||
|
static void push_tskinfo_2_buffer(variable_monitor_task *tsk_info,
|
||||||
|
unsigned long *flags) {
|
||||||
// printk(KERN_INFO "push_tsk_info\n");
|
// printk(KERN_INFO "push_tsk_info\n");
|
||||||
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, *flags);
|
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, *flags);
|
||||||
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
||||||
@@ -101,74 +116,84 @@ static void clear_all_watch(void) {
|
|||||||
memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list));
|
memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
void sample_task_work(struct work_struct *work){
|
/**
|
||||||
|
* @brief diag task info, for work queue
|
||||||
kernel_watch_timer *k_watch_timer = container_of(work, kernel_watch_timer, wk);
|
*
|
||||||
|
* @param work
|
||||||
|
*/
|
||||||
|
void diag_task_info_work(struct work_struct *work) {
|
||||||
|
|
||||||
if (k_watch_timer->threshold_num <= 0) return;
|
kernel_watch_timer *k_watch_timer =
|
||||||
|
container_of(work, kernel_watch_timer, wk);
|
||||||
|
|
||||||
printk(KERN_INFO "sample_task_work\n");
|
if (k_watch_timer->threshold_num <= 0) // if no threshold reached
|
||||||
|
return;
|
||||||
|
|
||||||
struct task_struct *g, *p; // g: task group; p: task
|
printk(KERN_INFO "diag_task_info_work\n");
|
||||||
unsigned long flags;
|
|
||||||
unsigned long event_id = get_cycles();
|
|
||||||
|
|
||||||
static variable_monitor_task tsk_info = {0};
|
|
||||||
static variable_monitor_record vm_record = {0};
|
|
||||||
kernel_watch_arg *kwarg;
|
|
||||||
|
|
||||||
vm_record.id = event_id;
|
struct task_struct *g, *p; // g: task group; p: task
|
||||||
vm_record.et_type = 0; //! todo event type
|
unsigned long flags;
|
||||||
vm_record.tv = ktime_get_real();
|
unsigned long event_id = get_cycles();
|
||||||
vm_record.threshold_num = k_watch_timer->threshold_num;
|
|
||||||
|
|
||||||
int i;
|
static variable_monitor_task tsk_info = {0};
|
||||||
for (i = 0; i < vm_record.threshold_num; i++) {
|
static variable_monitor_record vm_record = {0};
|
||||||
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
|
kernel_watch_arg *kwarg;
|
||||||
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
|
|
||||||
|
vm_record.id = event_id;
|
||||||
|
vm_record.et_type = 0; //! todo event type
|
||||||
|
vm_record.tv = ktime_get_real();
|
||||||
|
vm_record.threshold_num = k_watch_timer->threshold_num;
|
||||||
|
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < vm_record.threshold_num; i++) {
|
||||||
|
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
|
||||||
|
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
|
||||||
|
}
|
||||||
|
// !todo 调整输出
|
||||||
|
printk(KERN_INFO "-------------------------------------\n");
|
||||||
|
printk(KERN_INFO "-----------variable monitor----------\n");
|
||||||
|
printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv);
|
||||||
|
|
||||||
|
for (i = 0; i < vm_record.threshold_num; i++) {
|
||||||
|
printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n",
|
||||||
|
vm_record.threshold_record[i].task_id,
|
||||||
|
vm_record.threshold_record[i]
|
||||||
|
.name, // Assuming name is a null-terminated string
|
||||||
|
vm_record.threshold_record[i].ptr,
|
||||||
|
vm_record.threshold_record[i].threshold);
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
|
||||||
|
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
||||||
|
sizeof(variable_monitor_record));
|
||||||
|
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
|
||||||
|
sizeof(variable_monitor_record));
|
||||||
|
diag_variant_buffer_seal(&load_monitor_variant_buffer);
|
||||||
|
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
// for task info
|
||||||
|
do_each_thread(g, p) {
|
||||||
|
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
|
||||||
|
p->__state == TASK_IDLE || 1) {
|
||||||
|
|
||||||
|
get_task_struct(p); // count +1
|
||||||
|
|
||||||
|
tsk_info.et_type = 1; //! todo event type
|
||||||
|
tsk_info.id = event_id;
|
||||||
|
tsk_info.tv = vm_record.tv;
|
||||||
|
diag_tsk(p, &tsk_info);
|
||||||
|
|
||||||
|
put_task_struct(p); // count -1
|
||||||
|
|
||||||
|
push_tskinfo_2_buffer(&tsk_info, &flags); // push to buffer
|
||||||
}
|
}
|
||||||
// !todo 调整输出
|
}
|
||||||
printk(KERN_INFO "超出阈值:%lld\n", vm_record.tv);
|
while_each_thread(g, p);
|
||||||
|
printk("-------------------------------------\n");
|
||||||
for (i = 0; i < vm_record.threshold_num; i++) {
|
return;
|
||||||
printk(KERN_INFO "\t: pid: %d, name: %s, ptr: %p, threshold:%lld\n",
|
|
||||||
vm_record.threshold_record[i].task_id,
|
|
||||||
vm_record.threshold_record[i]
|
|
||||||
.name, // Assuming name is a null-terminated string
|
|
||||||
vm_record.threshold_record[i].ptr,
|
|
||||||
vm_record.threshold_record[i].threshold);
|
|
||||||
}
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
|
|
||||||
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
|
||||||
sizeof(variable_monitor_record));
|
|
||||||
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
|
|
||||||
sizeof(variable_monitor_record));
|
|
||||||
diag_variant_buffer_seal(&load_monitor_variant_buffer);
|
|
||||||
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
do_each_thread(g, p) {
|
|
||||||
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
|
|
||||||
p->__state == TASK_IDLE || 1) {
|
|
||||||
|
|
||||||
get_task_struct(p); // count +1
|
|
||||||
|
|
||||||
tsk_info.et_type = 1; //! todo event type
|
|
||||||
tsk_info.id = event_id;
|
|
||||||
tsk_info.tv = vm_record.tv;
|
|
||||||
diag_tsk(p, &tsk_info);
|
|
||||||
|
|
||||||
put_task_struct(p); // count -1
|
|
||||||
|
|
||||||
push_tsk_info(&tsk_info, &flags); // push to buffer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while_each_thread(g, p);
|
|
||||||
return ;
|
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @brief all module function init. orig_X | buffer | workqueue
|
* @brief all module function init. orig_X | buffer | workqueue
|
||||||
@@ -184,12 +209,6 @@ int monitor_init(void) {
|
|||||||
ret = init_buffer(50 * 1024 * 1024); // 50M
|
ret = init_buffer(50 * 1024 * 1024); // 50M
|
||||||
if (ret)
|
if (ret)
|
||||||
return -1;
|
return -1;
|
||||||
// init workqueue
|
|
||||||
// int i;
|
|
||||||
// for (i=0; i < MAX_TIMER_NUM; i++) {
|
|
||||||
// kernel_watch_timer *kw_timer = &kernel_wtimer_list[i];
|
|
||||||
// INIT_WORK(&kw_timer->wk, sample_task_work);
|
|
||||||
// }
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +252,7 @@ int start_watch_variable(watch_arg warg) {
|
|||||||
w_arg2k_w_arg(kptr, warg, &k_watch_arg);
|
w_arg2k_w_arg(kptr, warg, &k_watch_arg);
|
||||||
timer = get_timer(warg.time_ns); // get a valuable timer
|
timer = get_timer(warg.time_ns); // get a valuable timer
|
||||||
|
|
||||||
INIT_WORK(&timer->wk, sample_task_work);
|
INIT_WORK(&timer->wk, diag_task_info_work);
|
||||||
|
|
||||||
printk(KERN_INFO "ptr transform kptr: %p\n", kptr);
|
printk(KERN_INFO "ptr transform kptr: %p\n", kptr);
|
||||||
printk(KERN_INFO "timer: %p\n", timer);
|
printk(KERN_INFO "timer: %p\n", timer);
|
||||||
@@ -262,7 +281,6 @@ void clear_watch(pid_t pid) {
|
|||||||
start_all_hrTimer(); // restart timer
|
start_all_hrTimer(); // restart timer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief main callback function
|
* @brief main callback function
|
||||||
*
|
*
|
||||||
@@ -298,32 +316,7 @@ enum hrtimer_restart check_variable_cb(struct hrtimer *timer) {
|
|||||||
return HRTIMER_RESTART; // restart timer
|
return HRTIMER_RESTART; // restart timer
|
||||||
}
|
}
|
||||||
|
|
||||||
// static int diag_test(int nid); // for test
|
int diag_test(int nid) {
|
||||||
// static void test(struct task_struct *p, variable_monitor_task *tsk_info){
|
|
||||||
// // unsigned int nr_bt;
|
|
||||||
// printk(KERN_INFO "diag_tsk\n");
|
|
||||||
// diag_task_brief(p, &tsk_info->task); // task brief
|
|
||||||
// // printk("1\n");
|
|
||||||
// diag_task_user_stack(p, &tsk_info->user_stack); // user stack
|
|
||||||
// diag_task_kern_stack(p, &tsk_info->kern_stack); // kernel stack
|
|
||||||
// dump_proc_chains_argv(1, p, &mm_tree_struct,
|
|
||||||
// &tsk_info->proc_chains); // proc chains
|
|
||||||
// diag_task_raw_stack(p, &tsk_info->raw_stack); // raw stack
|
|
||||||
// printk(KERN_INFO "diag_tsk finish\n");
|
|
||||||
// }
|
|
||||||
|
|
||||||
// static void test2(variable_monitor_task *tsk_info, unsigned long flags){
|
|
||||||
// printk(KERN_INFO "test2\n");
|
|
||||||
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
|
|
||||||
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,sizeof(variable_monitor_task));
|
|
||||||
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, tsk_info,
|
|
||||||
// sizeof(variable_monitor_task));
|
|
||||||
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
|
|
||||||
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
|
||||||
// printk(KERN_INFO "test2 finish\n");
|
|
||||||
// }
|
|
||||||
|
|
||||||
int diag_test(int nid){
|
|
||||||
// static struct task_struct *tsk;
|
// static struct task_struct *tsk;
|
||||||
// static struct task_struct *leader;
|
// static struct task_struct *leader;
|
||||||
// static variable_monitor_task tsk_info;
|
// static variable_monitor_task tsk_info;
|
||||||
@@ -331,24 +324,24 @@ int diag_test(int nid){
|
|||||||
|
|
||||||
// int ret;
|
// int ret;
|
||||||
// unsigned long flags;
|
// unsigned long flags;
|
||||||
// pid_t id = (pid_t)nid;
|
// pid_t id = (pid_t)nid;
|
||||||
|
|
||||||
// rcu_read_lock();
|
// rcu_read_lock();
|
||||||
// tsk = NULL;
|
// tsk = NULL;
|
||||||
// if (orig_find_task_by_vpid)
|
// if (orig_find_task_by_vpid)
|
||||||
// tsk = orig_find_task_by_vpid(id);
|
// tsk = orig_find_task_by_vpid(id);
|
||||||
// if (!tsk) {
|
// if (!tsk) {
|
||||||
// ret = -EINVAL;
|
// ret = -EINVAL;
|
||||||
// rcu_read_unlock();
|
// rcu_read_unlock();
|
||||||
// return ret;
|
// return ret;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// leader = tsk->group_leader;
|
// leader = tsk->group_leader;
|
||||||
// if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){
|
// if (leader == NULL || leader->exit_state == EXIT_ZOMBIE){
|
||||||
// ret = -EINVAL;
|
// ret = -EINVAL;
|
||||||
// rcu_read_unlock();
|
// rcu_read_unlock();
|
||||||
// return ret;
|
// return ret;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// get_task_struct(tsk);
|
// get_task_struct(tsk);
|
||||||
// rcu_read_unlock();
|
// rcu_read_unlock();
|
||||||
@@ -371,56 +364,56 @@ int diag_test(int nid){
|
|||||||
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
||||||
// printk(KERN_INFO "5\n");
|
// printk(KERN_INFO "5\n");
|
||||||
|
|
||||||
struct task_struct *g, *p; // g: task group; p: task
|
struct task_struct *g, *p; // g: task group; p: task
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long event_id = get_cycles();
|
unsigned long event_id = get_cycles();
|
||||||
|
|
||||||
static variable_monitor_task tsk_info = {0};
|
static variable_monitor_task tsk_info = {0};
|
||||||
static variable_monitor_record vm_record = {0};
|
static variable_monitor_record vm_record = {0};
|
||||||
|
|
||||||
// vm_record.id = event_id;
|
// vm_record.id = event_id;
|
||||||
// vm_record.et_type = 0; //! todo event type
|
// vm_record.et_type = 0; //! todo event type
|
||||||
vm_record.tv = ktime_get_real();
|
vm_record.tv = ktime_get_real();
|
||||||
// vm_record.threshold_num = j;
|
// vm_record.threshold_num = j;
|
||||||
|
|
||||||
// printk("-------------------------------------\n");
|
// printk("-------------------------------------\n");
|
||||||
// printk("-------------watch monitor-----------\n");
|
// printk("-------------watch monitor-----------\n");
|
||||||
// printk("Threshold reached:\n");
|
// printk("Threshold reached:\n");
|
||||||
// for (i = 0; i < j; i++) {
|
// for (i = 0; i < j; i++) {
|
||||||
// kwarg = &k_watch_timer->k_watch_args[buffer[i]];
|
// kwarg = &k_watch_timer->k_watch_args[buffer[i]];
|
||||||
// k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
|
// k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
|
||||||
// }
|
// }
|
||||||
// rcu_read_lock();
|
// rcu_read_lock();
|
||||||
|
|
||||||
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
|
|
||||||
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
|
||||||
// sizeof(variable_monitor_record));
|
|
||||||
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
|
|
||||||
// sizeof(variable_monitor_record));
|
|
||||||
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
|
|
||||||
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
|
||||||
|
|
||||||
rcu_read_unlock();
|
// diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
|
||||||
|
// diag_variant_buffer_reserve(&load_monitor_variant_buffer,
|
||||||
|
// sizeof(variable_monitor_record));
|
||||||
|
// diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
|
||||||
|
// sizeof(variable_monitor_record));
|
||||||
|
// diag_variant_buffer_seal(&load_monitor_variant_buffer);
|
||||||
|
// diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
|
||||||
|
|
||||||
do_each_thread(g, p) {
|
rcu_read_unlock();
|
||||||
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
|
|
||||||
p->__state == TASK_IDLE || 1) {
|
|
||||||
|
|
||||||
get_task_struct(p);
|
do_each_thread(g, p) {
|
||||||
|
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
|
||||||
|
p->__state == TASK_IDLE || 1) {
|
||||||
|
|
||||||
tsk_info.et_type = 1; //! todo event type
|
get_task_struct(p);
|
||||||
tsk_info.id = event_id;
|
|
||||||
tsk_info.tv = vm_record.tv;
|
|
||||||
|
|
||||||
diag_tsk(p, &tsk_info);
|
tsk_info.et_type = 1; //! todo event type
|
||||||
|
tsk_info.id = event_id;
|
||||||
put_task_struct(p);
|
tsk_info.tv = vm_record.tv;
|
||||||
|
|
||||||
push_tsk_info(&tsk_info, &flags);
|
diag_tsk(p, &tsk_info);
|
||||||
}
|
|
||||||
|
put_task_struct(p);
|
||||||
|
|
||||||
|
push_tskinfo_2_buffer(&tsk_info, &flags);
|
||||||
}
|
}
|
||||||
while_each_thread(g, p);
|
}
|
||||||
printk("-------------------------------------\n");
|
while_each_thread(g, p);
|
||||||
|
printk("-------------------------------------\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
#include "monitor_mem.h"
|
#include "monitor_mem.h"
|
||||||
|
|
||||||
#include <linux/highmem.h> // for FOLL_FORCE
|
#include <linux/highmem.h> // for FOLL_FORCE
|
||||||
#include <linux/sched.h> // pid pid_task
|
#include <linux/sched.h> // pid pid_task
|
||||||
#include <linux/slab.h> /* for kmalloc */
|
|
||||||
#include <linux/sched/mm.h> // get_task_mm
|
#include <linux/sched/mm.h> // get_task_mm
|
||||||
|
#include <linux/slab.h> /* for kmalloc */
|
||||||
|
|
||||||
/// @brief transfer user space address to kernel space address
|
/// @brief transfer user space address to kernel space address
|
||||||
/// change static global "kaddr" and "page" value
|
/// change static global "kaddr" and "page" value
|
||||||
|
|||||||
@@ -4,10 +4,10 @@
|
|||||||
// #include <linux/sched.h>
|
// #include <linux/sched.h>
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int __user *user_ptr_len;
|
int __user *user_ptr_len;
|
||||||
size_t __user user_buf_len;
|
size_t __user user_buf_len;
|
||||||
void __user *user_buf;
|
void __user *user_buf;
|
||||||
} ioctl_dump_param;
|
} ioctl_dump_param; // for ioctl_num 1;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
pid_t task_id; // current process id
|
pid_t task_id; // current process id
|
||||||
|
|||||||
@@ -46,6 +46,13 @@ static inline int orig_diag_cgroup_name(struct cgroup *cgrp, char *buf,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief find mm_info by mm
|
||||||
|
*
|
||||||
|
* @param mm_tree
|
||||||
|
* @param mm
|
||||||
|
* @return mm_info*
|
||||||
|
*/
|
||||||
static inline mm_info *find_mm_info(mm_tree *mm_tree, struct mm_struct *mm) {
|
static inline mm_info *find_mm_info(mm_tree *mm_tree, struct mm_struct *mm) {
|
||||||
mm_info *info;
|
mm_info *info;
|
||||||
if (mm == NULL)
|
if (mm == NULL)
|
||||||
@@ -76,6 +83,13 @@ static void diag_cgroup_name(struct task_struct *tsk, char *buf,
|
|||||||
__diag_cgroup_name(tsk, buf, count, cgroup);
|
__diag_cgroup_name(tsk, buf, count, cgroup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief copy stack frame by fp
|
||||||
|
*
|
||||||
|
* @param fp
|
||||||
|
* @param frame
|
||||||
|
* @return int
|
||||||
|
*/
|
||||||
static int copy_stack_frame(const void __user *fp,
|
static int copy_stack_frame(const void __user *fp,
|
||||||
struct stack_frame_user *frame) {
|
struct stack_frame_user *frame) {
|
||||||
int ret;
|
int ret;
|
||||||
@@ -97,17 +111,23 @@ static int copy_stack_frame_remote(struct task_struct *tsk,
|
|||||||
|
|
||||||
mm = get_task_mm(tsk);
|
mm = get_task_mm(tsk);
|
||||||
if (!mm) {
|
if (!mm) {
|
||||||
printk("copy_stack_frame_remote %d get_task_mm fail\n", tsk->pid);
|
printk(KERN_INFO "copy_stack_frame_remote %d get_task_mm fail\n", tsk->pid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = orig_access_remote_vm(mm, (unsigned long)fp, frame, sizeof(*frame), 0);
|
ret = orig_access_remote_vm(mm, (unsigned long)fp, frame, sizeof(*frame), 0);
|
||||||
// printk("copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret);
|
// printk(KERN_INFO "copy_stack_frame_remote %d ret:%d\n", tsk->pid, ret);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief save stack trace | not current task
|
||||||
|
*
|
||||||
|
* @param tsk
|
||||||
|
* @param trace
|
||||||
|
*/
|
||||||
static inline void save_stack_trace_user_remote(struct task_struct *tsk,
|
static inline void save_stack_trace_user_remote(struct task_struct *tsk,
|
||||||
struct stack_trace *trace) {
|
struct stack_trace *trace) {
|
||||||
const struct pt_regs *regs = task_pt_regs(tsk);
|
const struct pt_regs *regs = task_pt_regs(tsk);
|
||||||
@@ -115,12 +135,14 @@ static inline void save_stack_trace_user_remote(struct task_struct *tsk,
|
|||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (in_atomic()) {
|
if (in_atomic()) {
|
||||||
printk("save_stack_trace_user_remote %d in_atomic\n", tsk->pid);
|
printk(KERN_INFO "save_stack_trace_user_remote %d: task in_atomic\n",
|
||||||
|
tsk->pid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irqs_disabled()) {
|
if (irqs_disabled()) {
|
||||||
printk("save_stack_trace_user_remote %d irqs_disabled\n", tsk->pid);
|
printk(KERN_INFO "save_stack_trace_user_remote %d: task in irqs_disabled\n",
|
||||||
|
tsk->pid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,30 +156,34 @@ static inline void save_stack_trace_user_remote(struct task_struct *tsk,
|
|||||||
frame.ret_addr = 0;
|
frame.ret_addr = 0;
|
||||||
|
|
||||||
if (!copy_stack_frame_remote(tsk, fp, &frame)) {
|
if (!copy_stack_frame_remote(tsk, fp, &frame)) {
|
||||||
// printk("save_stack_trace_user_remote %d copy_stack_frame_remote fail\n",
|
// printk(KERN_INFO "save_stack_trace_user_remote %d
|
||||||
|
// copy_stack_frame_remote fail\n",
|
||||||
// tsk->pid);
|
// tsk->pid);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((unsigned long)fp < regs->sp) {
|
if ((unsigned long)fp < regs->sp) {
|
||||||
// printk("save_stack_trace_user_remote %d fp < sp count:%d\n", tsk->pid,
|
// printk(KERN_INFO "save_stack_trace_user_remote %d fp < sp count:%d\n",
|
||||||
|
// tsk->pid,
|
||||||
// count);
|
// count);
|
||||||
break; // 如果fp小于sp,说明已经到了栈底,退出
|
break; // 如果fp小于sp,说明已经到了栈底,退出
|
||||||
}
|
}
|
||||||
// 如果返回地址不为0,说明是一个有效的栈帧,保存返回地址
|
// 如果返回地址不为0,说明是一个有效的栈帧,保存返回地址
|
||||||
if (frame.ret_addr) {
|
if (frame.ret_addr) {
|
||||||
trace->entries[trace->nr_entries++] = frame.ret_addr;
|
trace->entries[trace->nr_entries++] = frame.ret_addr;
|
||||||
// printk("save_stack_trace_user_remote %d ret_addr:%lx\n", tsk->pid,
|
// printk(KERN_INFO "save_stack_trace_user_remote %d ret_addr:%lx\n",
|
||||||
// frame.ret_addr);
|
// tsk->pid,
|
||||||
|
// frame.ret_addr);
|
||||||
} else {
|
} else {
|
||||||
// printk("save_stack_trace_user_remote %d no ret_addr", tsk->pid);
|
// printk(KERN_INFO "save_stack_trace_user_remote %d no ret_addr",
|
||||||
|
// tsk->pid);
|
||||||
break;
|
break;
|
||||||
// continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 如果fp指向自己,说明已经到了栈底,退出
|
// 如果fp指向自己,说明已经到了栈底,退出
|
||||||
if (fp == frame.next_fp) {
|
if (fp == frame.next_fp) {
|
||||||
// printk("save_stack_trace_user_remote %d fp == next_fp", tsk->pid);
|
// printk(KERN_INFO "save_stack_trace_user_remote %d fp == next_fp",
|
||||||
|
// tsk->pid);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
fp = frame.next_fp; // 否则,继续向下遍历
|
fp = frame.next_fp; // 否则,继续向下遍历
|
||||||
@@ -214,6 +240,11 @@ static void perfect_save_stack_trace_user(struct stack_trace *trace) {
|
|||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief save stack trace | current task
|
||||||
|
*
|
||||||
|
* @param backtrace
|
||||||
|
*/
|
||||||
static void diagnose_save_stack_trace_user(unsigned long *backtrace) {
|
static void diagnose_save_stack_trace_user(unsigned long *backtrace) {
|
||||||
struct stack_trace trace;
|
struct stack_trace trace;
|
||||||
|
|
||||||
@@ -224,6 +255,13 @@ static void diagnose_save_stack_trace_user(unsigned long *backtrace) {
|
|||||||
perfect_save_stack_trace_user(&trace);
|
perfect_save_stack_trace_user(&trace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief save stack trace | not current task
|
||||||
|
*
|
||||||
|
* @param tsk
|
||||||
|
* @param backtrace
|
||||||
|
*/
|
||||||
|
|
||||||
static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk,
|
static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk,
|
||||||
unsigned long *backtrace) {
|
unsigned long *backtrace) {
|
||||||
struct stack_trace trace;
|
struct stack_trace trace;
|
||||||
@@ -237,18 +275,11 @@ static void diagnose_save_stack_trace_user_remote(struct task_struct *tsk,
|
|||||||
* Trace user stack if we are not a kernel thread
|
* Trace user stack if we are not a kernel thread
|
||||||
*/
|
*/
|
||||||
if (tsk->mm) {
|
if (tsk->mm) {
|
||||||
// printk("save_stack_trace_user_remote %d mm\n", tsk->pid);
|
// printk(KERN_INFO "save_stack_trace_user_remote %d mm\n", tsk->pid);
|
||||||
save_stack_trace_user_remote(tsk, &trace);
|
save_stack_trace_user_remote(tsk, &trace);
|
||||||
}
|
}
|
||||||
if (trace.nr_entries < trace.max_entries)
|
if (trace.nr_entries < trace.max_entries)
|
||||||
trace.entries[trace.nr_entries++] = ULONG_MAX;
|
trace.entries[trace.nr_entries++] = ULONG_MAX;
|
||||||
|
|
||||||
// printk("save_stack_trace_user_remote %d, stack: [", tsk->pid);
|
|
||||||
// int i = 0;
|
|
||||||
// for (i = 0; i < BACKTRACE_DEPTH; i++) {
|
|
||||||
// printk("%lx, ", backtrace[i]);
|
|
||||||
// }
|
|
||||||
// printk("]\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to,
|
static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to,
|
||||||
@@ -257,12 +288,12 @@ static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to,
|
|||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
|
||||||
if (in_atomic()) {
|
if (in_atomic()) {
|
||||||
printk("task_raw_stack_remote %d in_atomic\n", tsk->pid);
|
printk(KERN_INFO "task_raw_stack_remote %d in_atomic\n", tsk->pid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irqs_disabled()) {
|
if (irqs_disabled()) {
|
||||||
printk("task_raw_stack_remote %d irqs_disabled\n", tsk->pid);
|
printk(KERN_INFO "task_raw_stack_remote %d irqs_disabled\n", tsk->pid);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,7 +308,8 @@ static int diagnose_task_raw_stack_remote(struct task_struct *tsk, void *to,
|
|||||||
ret = orig_access_remote_vm(mm, (unsigned long)from, to, n, 0);
|
ret = orig_access_remote_vm(mm, (unsigned long)from, to, n, 0);
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
|
|
||||||
// printk("task_raw_stack_remote %d access_remote_vm ret: %d\n", tsk->pid, ret);
|
// printk(KERN_INFO "task_raw_stack_remote %d access_remote_vm ret: %d\n",
|
||||||
|
// tsk->pid, ret);
|
||||||
|
|
||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
@@ -384,19 +416,34 @@ void diag_task_user_stack(struct task_struct *tsk, user_stack_detail *detail) {
|
|||||||
detail->bp = bp;
|
detail->bp = bp;
|
||||||
|
|
||||||
if (tsk == current) {
|
if (tsk == current) {
|
||||||
// printk("diag_task_user_stack %d current\n", tsk->pid);
|
// printk(KERN_INFO "diag_task_user_stack %d current\n", tsk->pid);
|
||||||
diagnose_save_stack_trace_user(detail->stack);
|
diagnose_save_stack_trace_user(detail->stack);
|
||||||
} else {
|
} else {
|
||||||
// printk("diag_task_user_stack %d no current\n", tsk->pid);
|
// printk(KERN_INFO "diag_task_user_stack %d no current\n", tsk->pid);
|
||||||
diagnose_save_stack_trace_user_remote(tsk, detail->stack);
|
diagnose_save_stack_trace_user_remote(tsk, detail->stack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief diag task kernel stack | -> to orig_stack_trace_save_tsk
|
||||||
|
*
|
||||||
|
* @param tsk
|
||||||
|
* @param detail
|
||||||
|
* @return unsigned int
|
||||||
|
*/
|
||||||
unsigned int diag_task_kern_stack(struct task_struct *tsk,
|
unsigned int diag_task_kern_stack(struct task_struct *tsk,
|
||||||
kern_stack_detail *detail) {
|
kern_stack_detail *detail) {
|
||||||
return orig_stack_trace_save_tsk(tsk, detail->stack, BACKTRACE_DEPTH, 0);
|
return orig_stack_trace_save_tsk(tsk, detail->stack, BACKTRACE_DEPTH, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief diag task proc chains
|
||||||
|
*
|
||||||
|
* @param style
|
||||||
|
* @param tsk
|
||||||
|
* @param mm_tree
|
||||||
|
* @param detail
|
||||||
|
*/
|
||||||
void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree,
|
void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree,
|
||||||
proc_chains_detail *detail) {
|
proc_chains_detail *detail) {
|
||||||
struct task_struct *walker;
|
struct task_struct *walker;
|
||||||
@@ -438,11 +485,6 @@ void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree,
|
|||||||
detail->full_argv[cnt] = 0;
|
detail->full_argv[cnt] = 0;
|
||||||
}
|
}
|
||||||
detail->tgid[cnt] = walker->pid;
|
detail->tgid[cnt] = walker->pid;
|
||||||
// if ((detail->tgid[cnt] != 0) | (detail->full_argv[cnt] != 0)) {
|
|
||||||
// printk("pid: %d,full_argv: %d, chains: %s, cnt:%d\n",
|
|
||||||
// detail->tgid[cnt],
|
|
||||||
// detail->full_argv[cnt], detail->chains[cnt], cnt);
|
|
||||||
// }
|
|
||||||
walker = rcu_dereference(walker->real_parent);
|
walker = rcu_dereference(walker->real_parent);
|
||||||
cnt++;
|
cnt++;
|
||||||
if (cnt >= PROCESS_CHAINS_COUNT)
|
if (cnt >= PROCESS_CHAINS_COUNT)
|
||||||
@@ -453,9 +495,9 @@ void dump_proc_chains_argv(int style, struct task_struct *tsk, mm_tree *mm_tree,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief copy task raw stack
|
* @brief copy task raw stack
|
||||||
*
|
*
|
||||||
* @param tsk
|
* @param tsk
|
||||||
* @param detail
|
* @param detail
|
||||||
*/
|
*/
|
||||||
void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) {
|
void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) {
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
@@ -493,7 +535,8 @@ void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) {
|
|||||||
ret = diagnose_task_raw_stack_remote(
|
ret = diagnose_task_raw_stack_remote(
|
||||||
tsk, stack, (void __user *)sp + detail->stack_size, 1024);
|
tsk, stack, (void __user *)sp + detail->stack_size, 1024);
|
||||||
}
|
}
|
||||||
// printk("diag_task_raw_stack %d i:%d ret:%d\n", tsk->pid, i, ret);
|
// printk(KERN_INFO "diag_task_raw_stack %d i:%d ret:%d\n", tsk->pid, i,
|
||||||
|
// ret);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
else
|
else
|
||||||
@@ -501,71 +544,4 @@ void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail) {
|
|||||||
|
|
||||||
stack += 1024;
|
stack += 1024;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief print all task stack
|
|
||||||
/// @param
|
|
||||||
// static void print_task_stack(void) {
|
|
||||||
// struct task_struct *g, *p; // g: task group; p: task
|
|
||||||
// unsigned long backtrace[BACKTRACE_DEPTH]; // save stack
|
|
||||||
// unsigned int nr_bt; // stack depth
|
|
||||||
// unsigned long long current_time; // last time
|
|
||||||
// current_time = ktime_get_real();
|
|
||||||
// printk("Timestamp (ns): %lld\n", current_time);
|
|
||||||
// printk("Recent Load: %lu.%02lu, %lu.%02lu, %lu.%02lu\n", // recent load
|
|
||||||
// LOAD_INT(avenrun[0]), LOAD_FRAC(avenrun[0]), LOAD_INT(avenrun[1]),
|
|
||||||
// LOAD_FRAC(avenrun[1]), LOAD_INT(avenrun[2]), LOAD_FRAC(avenrun[2]));
|
|
||||||
// rcu_read_lock(); // lock run queue
|
|
||||||
// // printk("Running task\n");
|
|
||||||
// do_each_thread(g, p) {
|
|
||||||
// if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
|
|
||||||
// p->__state == TASK_IDLE) {
|
|
||||||
// printk("task: %s, pid %d, state %d\n", p->comm, p->pid,
|
|
||||||
// p->__state); //! todo
|
|
||||||
// nr_bt = orig_stack_trace_save_tsk(p, backtrace, BACKTRACE_DEPTH, 0);
|
|
||||||
// stack_trace_print(backtrace, nr_bt, 0); // print
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// while_each_thread(g, p);
|
|
||||||
// rcu_read_unlock(); // unlock run queue
|
|
||||||
// }
|
|
||||||
|
|
||||||
// void diag_printf_kern_stack(kern_stack_detail *kern_stack, int reverse) {
|
|
||||||
// int i;
|
|
||||||
// symbol sym;
|
|
||||||
|
|
||||||
// printf(" 内核态堆栈:\n");
|
|
||||||
// if (reverse) {
|
|
||||||
// for (i = BACKTRACE_DEPTH - 1; i >= 0; i--) {
|
|
||||||
// if (kern_stack->stack[i] == (size_t)-1 || kern_stack->stack[i] == 0) {
|
|
||||||
// continue;
|
|
||||||
// }
|
|
||||||
// sym.reset(kern_stack->stack[i]);
|
|
||||||
// if (g_symbol_parser.find_kernel_symbol(sym)) {
|
|
||||||
// printf("#@ 0x%lx %s ([kernel.kallsyms])\n",
|
|
||||||
// kern_stack->stack[i],
|
|
||||||
// sym.name.c_str());
|
|
||||||
// } else {
|
|
||||||
// printf("#@ 0x%lx %s\n", kern_stack->stack[i], "UNKNOWN");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// for (i = 0; i < BACKTRACE_DEPTH; i++) {
|
|
||||||
// if (kern_stack->stack[i] == (size_t)-1 || kern_stack->stack[i] == 0) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// sym.reset(kern_stack->stack[i]);
|
|
||||||
// if (g_symbol_parser.find_kernel_symbol(sym)) {
|
|
||||||
// printf("#@ 0x%lx %s ([kernel.kallsyms])\n",
|
|
||||||
// kern_stack->stack[i],
|
|
||||||
// sym.name.c_str());
|
|
||||||
// } else {
|
|
||||||
// printf("#@ 0x%lx %s\n", kern_stack->stack[i], "UNKNOWN");
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// void diag_printf_kern_stack(struct diag_kern_stack_detail *kern_stack) {
|
|
||||||
// diag_printf_kern_stack(kern_stack, 0);
|
|
||||||
// }
|
|
||||||
@@ -56,11 +56,11 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
unsigned long user_mode;
|
unsigned long user_mode;
|
||||||
char comm[TASK_COMM_LEN];
|
char comm[TASK_COMM_LEN];
|
||||||
} task_detail;
|
} task_detail; // task brief
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned long stack[BACKTRACE_DEPTH];
|
unsigned long stack[BACKTRACE_DEPTH];
|
||||||
} kern_stack_detail;
|
} kern_stack_detail; // kernel stack
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
@@ -68,15 +68,15 @@ typedef struct {
|
|||||||
unsigned long bp;
|
unsigned long bp;
|
||||||
unsigned long sp;
|
unsigned long sp;
|
||||||
unsigned long stack[BACKTRACE_DEPTH];
|
unsigned long stack[BACKTRACE_DEPTH];
|
||||||
} user_stack_detail;
|
} user_stack_detail; // user stack
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
unsigned long bp;
|
unsigned long bp;
|
||||||
unsigned long sp;
|
unsigned long sp;
|
||||||
unsigned long stack_size;
|
unsigned long stack_size;
|
||||||
unsigned long stack[DIAG_USER_STACK_SIZE / sizeof(unsigned long)];
|
unsigned long stack[DIAG_USER_STACK_SIZE / sizeof(unsigned long)];
|
||||||
} raw_stack_detail;
|
} raw_stack_detail;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@@ -95,7 +95,7 @@ typedef struct {
|
|||||||
kern_stack_detail kern_stack; // kernel stack
|
kern_stack_detail kern_stack; // kernel stack
|
||||||
proc_chains_detail proc_chains; // process chains argv
|
proc_chains_detail proc_chains; // process chains argv
|
||||||
raw_stack_detail raw_stack;
|
raw_stack_detail raw_stack;
|
||||||
} variable_monitor_task;
|
} variable_monitor_task; // main struct
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
struct radix_tree_root mm_tree;
|
struct radix_tree_root mm_tree;
|
||||||
@@ -109,14 +109,15 @@ void diag_task_brief(struct task_struct *tsk,
|
|||||||
task_detail *detail); // get task brief
|
task_detail *detail); // get task brief
|
||||||
void diag_task_user_stack(struct task_struct *tsk,
|
void diag_task_user_stack(struct task_struct *tsk,
|
||||||
user_stack_detail *detail); // get task user stack
|
user_stack_detail *detail); // get task user stack
|
||||||
void diag_task_raw_stack(struct task_struct *tsk, raw_stack_detail *detail); // get task raw stack
|
void diag_task_raw_stack(struct task_struct *tsk,
|
||||||
unsigned int diag_task_kern_stack(struct task_struct *tsk,
|
raw_stack_detail *detail); // get task raw stack
|
||||||
kern_stack_detail *detail); // get task kernel stack
|
unsigned int
|
||||||
|
diag_task_kern_stack(struct task_struct *tsk,
|
||||||
|
kern_stack_detail *detail); // get task kernel stack
|
||||||
void dump_proc_chains_argv(
|
void dump_proc_chains_argv(
|
||||||
int style, struct task_struct *tsk, mm_tree *mm_tree,
|
int style, struct task_struct *tsk, mm_tree *mm_tree,
|
||||||
proc_chains_detail *detail); // get process chains argv
|
proc_chains_detail *detail); // get process chains argv
|
||||||
|
|
||||||
|
|
||||||
// print
|
// print
|
||||||
// void diag_printf_kern_stack(kern_stack_detail *kern_stack);
|
// void diag_printf_kern_stack(kern_stack_detail *kern_stack);
|
||||||
// void diag_printf_kern_stack(kern_stack_detail *kern_stack, int reverse);
|
// void diag_printf_kern_stack(kern_stack_detail *kern_stack, int reverse);
|
||||||
|
|||||||
Reference in New Issue
Block a user