add workqueue

This commit is contained in:
zy
2023-11-23 18:12:57 +08:00
parent 316174b71e
commit 1df11cd6e4
3 changed files with 92 additions and 78 deletions

View File

@@ -101,8 +101,61 @@ static void clear_all_watch(void) {
memset(kernel_wtimer_list, 0, sizeof(kernel_wtimer_list));
}
static void sample_task_work(struct work_struct *work){
kernel_watch_timer *k_watch_timer = container_of(work, kernel_watch_timer, wk);
if (k_watch_timer->threshold_num <= 0) return;
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
kernel_watch_arg *kwarg;
vm_record.id = event_id;
vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
vm_record.threshold_num = k_watch_timer->threshold_num;
for (int i = 0; i < vm_record.threshold_num; i++) {
kwarg = &k_watch_timer->k_watch_args[k_watch_timer->threshold_buffer[i]];
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
}
rcu_read_lock();
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
sizeof(variable_monitor_record));
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
sizeof(variable_monitor_record));
diag_variant_buffer_seal(&load_monitor_variant_buffer);
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
rcu_read_unlock();
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
get_task_struct(p); // count +1
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
diag_tsk(p, &tsk_info);
put_task_struct(p); // count -1
push_tsk_info(&tsk_info, &flags); // push to buffer
}
}
while_each_thread(g, p);
}
/**
* @brief all module function init. orig_X | buffer
* @brief all module function init. orig_X | buffer | workqueue
*
* @return int
*/
@@ -115,6 +168,11 @@ int monitor_init(void) {
ret = init_buffer(50 * 1024 * 1024); // 50M
if (ret)
return -1;
// init workqueue
for (int i=0; i < MAX_TIMER_NUM; i++) {
kernel_watch_timer *kw_timer = &kernel_wtimer_list[i];
INIT_WORK(&kw_timer->wk, sample_task_work);
}
return 0;
}
@@ -185,6 +243,7 @@ void clear_watch(pid_t pid) {
start_all_hrTimer(); // restart timer
}
/**
* @brief main callback function
*
@@ -195,7 +254,6 @@ enum hrtimer_restart check_variable_cb(struct hrtimer *timer) {
kernel_watch_timer *k_watch_timer =
container_of(timer, kernel_watch_timer, hr_timer);
int i = 0, j = 0;
int buffer[TIMER_MAX_WATCH_NUM]; // Buffer to store the messages
kernel_watch_arg *kwarg;
// check all watched kernel_watch_arg
@@ -203,66 +261,17 @@ enum hrtimer_restart check_variable_cb(struct hrtimer *timer) {
kwarg = &k_watch_timer->k_watch_args[i];
if (read_and_compare(kwarg->kptr, kwarg->length_byte, kwarg->greater_flag,
kwarg->unsigned_flag, kwarg->threshold)) {
buffer[j] = i;
k_watch_timer->threshold_buffer[j] = i;
j++;
}
}
if (j > 0) // if any threshold reached
{
struct task_struct *g, *p; // g: task group; p: task
unsigned long flags;
unsigned long event_id = get_cycles();
static variable_monitor_task tsk_info = {0};
static variable_monitor_record vm_record = {0};
vm_record.id = event_id;
vm_record.et_type = 0; //! todo event type
vm_record.tv = ktime_get_real();
vm_record.threshold_num = j;
// printk("-------------------------------------\n");
// printk("-------------watch monitor-----------\n");
// printk("Threshold reached:\n");
for (i = 0; i < j; i++) {
kwarg = &k_watch_timer->k_watch_args[buffer[i]];
k_w_arg2threshold(kwarg, &vm_record.threshold_record[i]);
}
rcu_read_lock();
diag_variant_buffer_spin_lock(&load_monitor_variant_buffer, flags);
diag_variant_buffer_reserve(&load_monitor_variant_buffer,
sizeof(variable_monitor_record));
diag_variant_buffer_write_nolock(&load_monitor_variant_buffer, &vm_record,
sizeof(variable_monitor_record));
diag_variant_buffer_seal(&load_monitor_variant_buffer);
diag_variant_buffer_spin_unlock(&load_monitor_variant_buffer, flags);
rcu_read_unlock();
do_each_thread(g, p) {
if (p->__state == TASK_RUNNING || __task_contributes_to_load(p) ||
p->__state == TASK_IDLE || 1) {
get_task_struct(p);
tsk_info.et_type = 1; //! todo event type
tsk_info.id = event_id;
tsk_info.tv = vm_record.tv;
diag_tsk(p, &tsk_info);
put_task_struct(p);
push_tsk_info(&tsk_info, &flags);
}
}
while_each_thread(g, p);
// print_task_stack();
k_watch_timer->threshold_num = j;
// restart timer after 5s
hrtimer_forward(timer, timer->base->get_time(), ktime_set(5, 0)); //! todo
printk("-------------------------------------\n");
// highpri_wq
queue_work(system_highpri_wq, &k_watch_timer->wk);
} else {
// keep frequency
hrtimer_forward(timer, timer->base->get_time(), k_watch_timer->kt);