This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
zhangyang-variable-monitor/source/module/monitor_perf.c

111 lines
3.4 KiB
C

#include "monitor_perf.h"
static struct perf_event *pe;
void vm_perf_overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs) {
// handle perf event data
// struct perf_callchain_entry *callchain;
// int nr, i;
pr_info("perf event callback\n");
perf_event_disable(event);
// 如果 perf_sample_data 有调用堆栈信息
// if (data->callchain) {
// callchain = data->callchain;
// nr = callchain->nr; // 调用堆栈的长度
// // 遍历堆栈条目并处理它们
// for (i = 0; i < nr; i++) {
// // callchain->ip[i] 包含了堆栈的每个条目
// // 在这里可以调用 to_buff 将堆栈信息写入缓冲区
// // to_buff(&callchain->ip[i], sizeof(callchain->ip[i]));
// pr_info("callchain->ip[%d] = %llx\n", i, callchain->ip[i]);
// }
// }
}
// static struct perf_event_attr pea = {
// .type = PERF_TYPE_SOFTWARE, // software event
// .size = sizeof(struct perf_event_attr), // size of attr
// .config = PERF_COUNT_SW_CPU_CLOCK, // no care
// PERF_COUNT_SW_DUMMY PERF_COUNT_SW_CPU_CLOCK .sample_period = 1, // sample
// every 1 event .sample_type =
// PERF_SAMPLE_CALLCHAIN, // sample callchain | means include stacktrace
// // .exclude_kernel = 1, // no kernel stacktrace | may need to change
// after test
// // .exclude_hv = 1, // no hypervisor stacktrace | may need to change
// after test .disabled = 0, // disabled at first
// };
struct perf_event_attr pea = {
.type = PERF_TYPE_SOFTWARE,
.size = sizeof(struct perf_event_attr),
.config = PERF_COUNT_SW_CPU_CLOCK,
.sample_period = 1,
.sample_type = PERF_SAMPLE_CALLCHAIN,
.disabled = 1,
};
#include <linux/cpumask.h>
#include <linux/smp.h>
/**
* @brief Set the up perf event for task object
*
* @param tsk
*/
void setup_perf_event_for_task(struct task_struct *tsk) {
pr_info("setup_perf_event_for_task: cpu = %d\n", tsk->on_cpu);
if (pe) {
pr_info("Perf event already created\n");
return;
}
// int cpu;
// struct perf_event **events;
// for_each_possible_cpu(cpu) {
// struct perf_event **event = per_cpu_ptr(events, cpu);
// if (cpu_is_offline(cpu)) {
// pr_info("cpu %d is offline\n", cpu);
// *event = NULL;
// continue;
// }
// *event = perf_event_create_kernel_counter(&pea, cpu, tsk,
// vm_perf_overflow_callback, NULL);
// // perf_event_create_kernel_counter(&pea, cpu, tsk,
// // vm_perf_overflow_callback,
// // NULL);
// if (IS_ERR(*event)) {
// printk(KERN_INFO "create perf event failure\n");
// // return -1;
// }
// perf_event_enable(*event);
// }
// pe = perf_event_create_kernel_counter(&pea, tsk->on_cpu, tsk,
// vm_perf_overflow_callback, NULL);
pe = perf_event_create_kernel_counter(&pea, -1, tsk,
vm_perf_overflow_callback, NULL);
if (IS_ERR(pe)) {
pr_info("Error in perf_event_create_kernel_counter\n");
return;
}
perf_event_enable(pe); // enable perf event
}
/**
* @brief Disable perf event
*
*/
void cleanup_perf_event(void) {
if (pe) {
perf_event_disable(pe);
perf_event_release_kernel(pe);
pe = NULL;
}
}