// SPDX-License-Identifier: GPL-2.0
#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/hashmap.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/target.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include "util/lock-contention.h"
#include <linux/zalloc.h>
#include <linux/string.h>
#include <api/fs/fs.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <inttypes.h>
#include "bpf_skel/lock_contention.skel.h"
#include "bpf_skel/lock_data.h"
static struct lock_contention_bpf *skel;
static bool has_slab_iter;
static struct hashmap slab_hash;
static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
{
return key;
}
static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
{
return key1 == key2;
}
static void check_slab_cache_iter(struct lock_contention *con)
{
s32 ret;
hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
con->btf = btf__load_vmlinux_btf();
if (con->btf == NULL) {
pr_debug("BTF loading failed: %s\n", strerror(errno));
return;
}
ret = btf__find_by_name_kind(con->btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
if (ret < 0) {
bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
pr_debug("slab cache iterator is not available: %d\n", ret);
return;
}
has_slab_iter = true;
bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
}
static void run_slab_cache_iter(void)
{
int fd;
char buf[256];
long key, *prev_key;
if (!has_slab_iter)
return;
fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter));
if (fd < 0) {
pr_debug("cannot create slab cache iter: %d\n", fd);
return;
}
/* This will run the bpf program */
while (read(fd, buf, sizeof(buf)) > 0)
continue;
close(fd);
/* Read the slab cache map and build a hash with IDs */
fd = bpf_map__fd(skel->maps.slab_caches);
prev_key = NULL;
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
struct slab_cache_data *data;
data = malloc(sizeof(*data));
if (data == NULL)
break;
if (bpf_map_lookup_elem(fd, &key, data) < 0)
break;
hashmap__add(&slab_hash, data->id, data);
prev_key = &key;
}
}
static void exit_slab_cache_iter(void)
{
struct hashmap_entry *cur;
unsigned bkt;
hashmap__for_each_entry(&slab_hash, cur, bkt)
free(cur->pvalue);
hashmap__clear(&slab_hash);
}
static void init_numa_data(struct lock_contention *con)
{
struct symbol *sym;
struct map *kmap;
char *buf = NULL, *p;
size_t len;
long last = -1;
int ret;
/*
* 'struct zone' is embedded in 'struct pglist_data' as an array.
* As we may not have full information of the struct zone in the
* (fake) vmlinux.h, let's get the actual size from BTF.
*/
ret = btf__find_by_name_kind(con->btf, "zone", BTF_KIND_STRUCT);
if (ret < 0) {
pr_debug("cannot get type of struct zone: %d\n", ret);
return;
}
ret = btf__resolve_size(con->btf, ret);
if (ret < 0) {
pr_debug("cannot get size of struct zone: %d\n", ret);
return;
}
skel->rodata->sizeof_zone = ret;
/* UMA system doesn't have 'node_data[]' - just use contig_page_data. */
sym = machine__find_kernel_symbol_by_name(con->machine,
"contig_page_data",
&kmap);
if (sym) {
skel->rodata->contig_page_data_addr = map__unmap_ip(kmap, sym->start);
map__put(kmap);
return;
}
/*
* The 'node_data' is an array of pointers to struct pglist_data.
* It needs to follow the pointer for each node in BPF to get the
* address of struct pglist_data and its zones.
*/
sym = machine__find_kernel_symbol_by_name(con->machine,
"node_data",
&kmap);
if (sym == NULL)
return;
skel->rodata->node_data_addr = map__unmap_ip(kmap, sym->start);
map__put(kmap);
/* get the number of online nodes using the last node number + 1 */
ret = sysfs__read_str("devices/system/node/online", &buf, &len);
if (ret < 0) {
pr_debug("failed to read online node: %d\n", ret);
return;
}
p = buf;
while (p && *p) {
last = strtol(p, &p, 0);
if (p && (*p == ',' || *p == '-' || *p == '\n'))
p++;
}
skel