Upgrade to Pro — share decks privately, control downloads, hide ads and more …

BTF is my name, Typing my game

Avatar for Patrick Pichler Patrick Pichler
March 06, 2025
12

BTF is my name, Typing my game

Join me on a technical deep dive to understand the inner workings of BTF.

Avatar for Patrick Pichler

Patrick Pichler

March 06, 2025
Tweet

Transcript

  1. $ sudo bpftool map dump name data [{ "key": 3,

    "value": { "comm": "bash" } },{ "key": 17, "value": { "comm": "cat" } }]
  2. core_sched.c bpf.c slab.c tcp.c Debug Info Debug Info Debug Info

    Debug Info <...>.c Debug Info .debug_info (ELF Binary) Linux Kernel Kernel Image compiles compiles compiles compiles 120 MB
  3. From: Alexei Starovoitov <[email protected]> To: Sandipan Das <[email protected]>, <[email protected]> Subject:

    Re: [RFC PATCH] bpf: Add helpers to read useful task_struct members Date: Sat, 4 Nov 2017 18:34:27 +0900 [thread overview] Message-ID: <[email protected]> (raw) I don't think it's a solution. Tracing scripts read other fields too. Making it work for these 3 fields is a drop in a bucket. If randomization is used I think we have to accept that existing bpf scripts won't be usable. Long term solution is to support 'BPF Type Format' or BTF (which is old C-Type Format) for kernel data structures, ... There will be a tool that will take dwarf from vmlinux and compress it into BTF. Kernel will also be able to verify that BTF is a valid BTF. ... LKML thread
  4. Long term solution is to support 'BPF Type Format' or

    BTF (which is old C-Type Format) for kernel data structures, There will be a tool that will take dwarf from vmlinux and compress it into BTF. Kernel will also be able to verify that BTF is a valid BTF. From: Alexei Starovoitov <[email protected]> To: Sandipan Das <[email protected]>, <[email protected]> Subject: Re: [RFC PATCH] bpf: Add helpers to read useful task_struct members Date: Sat, 4 Nov 2017 18:34:27 +0900 [thread overview] Message-ID: <[email protected]> (raw) I don't think it's a solution. Tracing scripts read other fields too. Making it work for these 3 fields is a drop in a bucket. If randomization is used I think we have to accept that existing bpf scripts won't be usable. ... ... LKML thread
  5. #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16

    magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ };
  6. #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16

    magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ };
  7. __u32 hdr_len; #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header

    { __u16 magic; __u8 version; __u8 flags; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ };
  8. __u32 type_off; /* offset of type section */ __u32 type_len;

    /* length of type section */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ };
  9. __u32 str_off; /* offset of string section */ __u32 str_len;

    /* length of string section */ #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 type_off; /* offset of type section */ __u32 type_len; /* length of type section */ };
  10. BTF strings section \0 __ARRAY_SIZE_TYPE__\0 u32\0 __u32\0 unsigned int\0 data\0

    comm\0 char\0 ... Data Offset 0x00 0x01 0x05 0x19 int\0 0x1D 0x23 0x30 0x35 0x3A btf_header __u16 magic __u8 version __u8 flags __u8 hdr_len __u32 type_off __u32 type_len __u32 str_off __u32 str_len Base Address: 0x00 Base Address: 0x00 + header.hdr_len + header.str_off
  11. __u32 type_off; /* offset of type section */ __u32 type_len;

    /* length of type section */ #define BTF_MAGIC 0xeB9F #define BTF_VERSION 1 struct btf_header { __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 str_off; /* offset of string section */ __u32 str_len; /* length of string section */ };
  12. BTF Types section BTF Type 2 BTF Type 3 BTF

    Type Appendix BTF Type 4 BTF Type Appendix BTF Type 5 BTF Type 6 btf_header __u16 magic __u8 version __u8 flags __u8 hdr_len __u32 type_off __u32 type_len __u32 str_off __u32 str_len Base Address: 0x00 Data Offset 0x00 0x0C 0x18 0x54 0x60 0x84 0x78 Base Address: 0x00 + header.hdr_len + header.type_off struct btf_type struct btf_member / struct btf_enum / struct btf_param / ...
  13. BTF Types section BTF Type 2 BTF Type 3 BTF

    Type Appendix BTF Type 4 BTF Type Appendix BTF Type 5 BTF Type 6 btf_header __u16 magic __u8 version __u8 flags __u8 hdr_len __u32 type_off __u32 type_len __u32 str_off __u32 str_len Base Address: 0x00 Data Offset 0x00 0x0C 0x18 0x54 0x60 0x84 0x78 Base Address: 0x00 + header.hdr_len + header.type_off struct btf_type struct btf_member / struct btf_enum / struct btf_param / ...
  14. BTF Types section BTF Type 2 BTF Type 3 BTF

    Type Appendix BTF Type 4 BTF Type Appendix BTF Type 5 BTF Type 6 btf_header __u16 magic __u8 version __u8 flags __u8 hdr_len __u32 type_off __u32 type_len __u32 str_off __u32 str_len Base Address: 0x00 Data Offset 0x00 0x0C 0x18 0x54 0x60 0x84 0x78 Base Address: 0x00 + header.hdr_len + header.type_off struct btf_type struct btf_member / struct btf_enum / struct btf_param / ...
  15. BTF strings section \0 __ARRAY_SIZE_TYPE__\0 u32\0 __u32\0 unsigned int\0 data\0

    comm\0 char\0 ... Data Offset 0x00 0x01 0x05 0x19 int\0 0x1D 0x23 0x30 0x35 0x3A struct btf_type u32 name_off u32 info union { u32 size u32 type } 0x30 0x... 0x...
  16. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  17. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  18. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  19. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  20. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  21. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  22. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  23. enum { BTF_KIND_INT = 1, /* Integer */ BTF_KIND_PTR =

    2, /* Pointer */ BTF_KIND_ARRAY = 3, /* Array */ BTF_KIND_STRUCT = 4, /* Struct */ BTF_KIND_UNION = 5, /* Union */ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */ BTF_KIND_FWD = 7, /* Forward */ BTF_KIND_TYPEDEF = 8, /* Typedef */ BTF_KIND_VOLATILE = 9, /* Volatile */ BTF_KIND_CONST = 10, /* Const */ BTF_KIND_RESTRICT = 11, /* Restrict */ BTF_KIND_FUNC = 12, /* Function */ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ BTF_KIND_VAR = 14, /* Variable */ BTF_KIND_DATASEC = 15, /* Section */ BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ BTF_KIND_TYPE_TAG = 18, /* Type Tag */ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */ };
  24. BTF_KIND_FUNC = 12, /* Function */ enum { // ...

    BTF_KIND_VOLATILE = 9, /* Volatile */ BTF_KIND_CONST = 10, /* Const */ BTF_KIND_RESTRICT = 11, /* Restrict */ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ BTF_KIND_VAR = 14, /* Variable */ // ... };
  25. BTF_KIND_INT = 1, /* Integer */ enum { BTF_KIND_PTR =

    2, /* Pointer */ BTF_KIND_ARRAY = 3, /* Array */ BTF_KIND_STRUCT = 4, /* Struct */ BTF_KIND_UNION = 5, /* Union */ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */ BTF_KIND_FWD = 7, /* Forward */ // ... };
  26. u32 info 0 - 15 vlen 16 - 23 unused

    24 - 28 kind 29 - 30 31 unused kind_flag 0 - 31
  27. BTF Types section BTF Type 2 BTF Type 3 BTF

    Type Appendix BTF Type 4 BTF Type Appendix BTF Type 5 BTF Type 6 btf_header __u16 magic __u8 version __u8 flags __u8 hdr_len __u32 type_off __u32 type_len __u32 str_off __u32 str_len Base Address: 0x00 Data Offset 0x00 0x0C 0x18 0x54 0x60 0x84 0x78 Base Address: 0x00 + header.hdr_len + header.type_off struct btf_type struct btf_member / struct btf_enum / struct btf_param / ...
  28. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  29. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  30. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  31. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  32. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  33. BTF Type int u32 name_off u32 info 0x00 vlen kind

    kind_flag u32 type 0x00 0x02 (BTF_KIND_PTR) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x01 BTF_KIND_PTR
  34. enum { BTF_KIND_PTR = 2, /* Pointer */ BTF_KIND_FWD =

    7, /* Forward */ BTF_KIND_TYPEDEF = 8, /* Typedef */ BTF_KIND_VOLATILE = 9, /* Volatile */ BTF_KIND_CONST = 10, /* Const */ BTF_KIND_RESTRICT = 11, /* Restrict */ BTF_KIND_FUNC = 12, /* Function */ BTF_KIND_FLOAT = 16, /* Floating point */ };
  35. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  36. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  37. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  38. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  39. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  40. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  41. BTFType info u32 name_off u32 info 0x30 vlen kind kind_flag

    u32 size 0x02 0x04 (BTF_KIND_STRUCT) 0x00 BTF Types section int data BTF Type Appendix info BTF Type Appendix char int * Data ID 1 2 3 4 5 0x10 BTF_KIND_STRUCT u32 name_off u32 type u32 offset 1 struct btf_member[] (BTF Type Appendix) u32 name_off u32 type u32 offset 2 0x30 0x35 0x02 0x01 0x00 0x00
  42. 9 .BTF 0000f1e0 0000000000000000 Sections: Idx Name Size VMA Type

    0 00000000 0000000000000000 1 .strtab 000000f0 0000000000000000 2 .text 00000000 0000000000000000 TEXT 3 tracepoint/raw_syscalls/sys_enter 000001e0 0000000000000000 TEXT 4 .reltracepoint/raw_syscalls/sys_enter 00000030 0000000000000000 5 license 0000000d 0000000000000000 DATA 6 .rodata 00000014 0000000000000000 DATA 7 .maps 00000030 0000000000000000 DATA 8 .bss 00000008 0000000000000000 BSS 10 .rel.BTF 00000060 0000000000000000 11 .BTF.ext 000001cc 0000000000000000 12 .rel.BTF.ext 00000190 0000000000000000 13 .llvm_addrsig 00000006 0000000000000000 14 .symtab 00000108 0000000000000000
  43. core_sched.c bpf.c slab.c tcp.c Debug Info Debug Info Debug Info

    Debug Info <...>.c Debug Info Kernel Debug Info Linux Kernel compiles compiles compiles compiles 120 MB pahole BTF Definitions
  44. /* CU #1: */ struct S; struct A { int

    a; struct A* self; struct S* parent; }; struct B; struct S { struct A* a_ptr; struct B* b_ptr; }; /* CU #2: */ struct S; struct A; struct B { int b; struct B* self; struct S* parent; }; struct S { struct A* a_ptr; struct B* b_ptr; };
  45. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent fwd 'B' 7 int 'int' 5 ptr 8 struct 'S' 9 struct A* a_ptr struct B* b_ptr ptr 10 ptr 12 struct 'B' 13 int b struct B* self struct S* parent fwd 'A' 11 int 'int' 14 CU #1 CU #2
  46. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent fwd 'B' 7 int 'int' 5 ptr 8 struct 'S' 9 struct A* a_ptr struct B* b_ptr ptr 10 ptr 12 struct 'B' 13 int b struct B* self struct S* parent fwd 'A' 11 int 'int' 14 CU #1 CU #2
  47. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent fwd 'B' 7 int 'int' 5 ptr 8 struct 'S' 9 struct A* a_ptr struct B* b_ptr ptr 10 ptr 12 struct 'B' 13 int b struct B* self struct S* parent fwd 'A' 11 int 'int' 14 CU #1 CU #2
  48. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent fwd 'B' 7 int 'int' 5 ptr 8 struct 'S' 9 struct A* a_ptr struct B* b_ptr ptr 10 ptr 12 struct 'B' 13 int b struct B* self struct S* parent fwd 'A' 11 int 'int' 14 CU #1 CU #2
  49. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent fwd 'B' 7 int 'int' 5 ptr 8 struct 'S' 9 struct A* a_ptr struct B* b_ptr ptr 10 ptr 12 struct 'B' 13 int b struct B* self struct S* parent fwd 'A' 11 int 'int' 14 CU #1 CU #2
  50. ptr 1 struct 'S' 2 struct A* a_ptr struct B*

    b_ptr ptr 3 ptr 6 struct 'A' 4 int a struct A* self struct S* parent int 'int' 5 struct 'B' 7 int b struct B* self struct S* parent
  51. $ sudo bpftool map dump name data [{ "key": 3,

    "value": { "comm": "bash" } },{ "key": 17, "value": { "comm": "cat" } }]
  52. int bpf(int cmd, union bpf_attr *attr, unsigned int size); enum

    bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, // ... }
  53. BPF_MAP_CREATE, int bpf(int cmd, union bpf_attr *attr, unsigned int size);

    enum bpf_cmd { BPF_MAP_LOOKUP_ELEM, BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM, BPF_MAP_GET_NEXT_KEY, // ... }
  54. union bpf_attr { struct { /* anonymous struct used by

    BPF_MAP_CREATE command */ __u32 map_type; __u32 key_size; __u32 value_size; __u32 max_entries; // ... char map_name[BPF_OBJ_NAME_LEN]; __u32 btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; // ... }; // ... };
  55. __u32 btf_key_type_id; __u32 btf_value_type_id; union bpf_attr { struct { /*

    anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; __u32 key_size; __u32 value_size; __u32 max_entries; // ... char map_name[BPF_OBJ_NAME_LEN]; __u32 btf_fd; // ... }; // ... };
  56. __u32 btf_fd; union bpf_attr { // ... struct { /*

    anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; __u32 key_size; __u32 value_size; __u32 max_entries; // ... char map_name[BPF_OBJ_NAME_LEN]; __u32 btf_key_type_id; __u32 btf_value_type_id; // ... }; // ... };
  57. BPF_BTF_LOAD, enum bpf_cmd { BPF_MAP_CREATE, // ... BPF_PROG_GET_NEXT_ID, BPF_MAP_GET_NEXT_ID, BPF_PROG_GET_FD_BY_ID,

    BPF_MAP_GET_FD_BY_ID, BPF_OBJ_GET_INFO_BY_FD, // ... BPF_BTF_GET_FD_BY_ID, // ... BPF_BTF_GET_NEXT_ID, // ... };
  58. union bpf_attr { // ... struct { /* anonymous struct

    for BPF_BTF_LOAD */ __aligned_u64 btf; __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; __u32 btf_log_true_size; __u32 btf_flags; //... }; // ... };
  59. __aligned_u64 btf; union bpf_attr { // ... struct { /*

    anonymous struct for BPF_BTF_LOAD */ __aligned_u64 btf_log_buf; __u32 btf_size; __u32 btf_log_size; __u32 btf_log_level; __u32 btf_log_true_size; __u32 btf_flags; //... }; // ... };
  60. void *btf_spec = load_btf_spec(); // However this is done int

    btf_fd = bpf(BPF_BTF_LOAD, { .btf = btf_spec, }, /*..*/); int map_fd = bpf(BPF_MAP_CREATE, { .btf_fd = btf_fd, .key_id = 4, .value_id = 6, }, /*..*/);
  61. void *btf_spec = load_btf_spec(); // However this is done int

    btf_fd = bpf(BPF_BTF_LOAD, { .btf = btf_spec, }, /*..*/); int map_fd = bpf(BPF_MAP_CREATE, { .btf_fd = btf_fd, .key_id = 4, .value_id = 6, }, /*..*/);
  62. int btf_fd = bpf(BPF_BTF_LOAD, { .btf = btf_spec, }, /*..*/);

    void *btf_spec = load_btf_spec(); // However this is done int map_fd = bpf(BPF_MAP_CREATE, { .btf_fd = btf_fd, .key_id = 4, .value_id = 6, }, /*..*/);
  63. int map_fd = bpf(BPF_MAP_CREATE, { .btf_fd = btf_fd, .key_id =

    4, .value_id = 6, }, /*..*/); void *btf_spec = load_btf_spec(); // However this is done int btf_fd = bpf(BPF_BTF_LOAD, { .btf = btf_spec, }, /*..*/);
  64. union bpf_attr { // ... struct { /* anonymous struct

    used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; __aligned_u64 info; } info; // ... };
  65. __u32 bpf_fd; union bpf_attr { // ... struct { /*

    anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 info_len; __aligned_u64 info; } info; // ... };
  66. __aligned_u64 info; union bpf_attr { // ... struct { /*

    anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __u32 info_len; } info; // ... };
  67. __u32 info_len; union bpf_attr { // ... struct { /*

    anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ __u32 bpf_fd; __aligned_u64 info; } info; // ... };
  68. struct bpf_map_info { __u32 type; __u32 id; __u32 key_size; __u32

    value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; // ... __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; // ... } __attribute__((aligned(8)));
  69. char name[BPF_OBJ_NAME_LEN]; struct bpf_map_info { __u32 type; __u32 id; __u32

    key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; // ... __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; // ... } __attribute__((aligned(8)));
  70. __u32 btf_key_type_id; __u32 btf_value_type_id; struct bpf_map_info { __u32 type; __u32

    id; __u32 key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; // ... __u32 btf_id; // ... } __attribute__((aligned(8)));
  71. __u32 btf_id; struct bpf_map_info { __u32 type; __u32 id; __u32

    key_size; __u32 value_size; __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; // ... __u32 btf_key_type_id; __u32 btf_value_type_id; // ... } __attribute__((aligned(8)));
  72. struct bpf_btf_info { __aligned_u64 btf; __u32 btf_size; __u32 id; __aligned_u64

    name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8)));
  73. __aligned_u64 btf; struct bpf_btf_info { __u32 btf_size; __u32 id; __aligned_u64

    name; __u32 name_len; __u32 kernel_btf; } __attribute__((aligned(8)));
  74. // ... struct task_struct *t = (void *)bpf_get_current_task(); u64 start_time

    = 0; bpf_core_read(&start_time, sizeof(start_time), t->start_time); // ...
  75. // ... struct task_struct *t = (void *)bpf_get_current_task(); u64 start_time

    = 0; bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r3 = *(u64 *)(r10 - 16) r3 += 1808 r1 = r10 r1 += -24 r2 = 8 call bpf_probe_read_kernel
  76. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r3 = *(u64 *)(r10 - 16) r3 += 1808 r1 = r10 r1 += -24 r2 = 8 call bpf_probe_read_kernel
  77. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... r3 = *(u64 *)(r10 - 16) r3 += 1808 r1 = r10 r1 += -24 r2 = 8 call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 call bpf_probe_read_kernel
  78. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... r1 = r10 r1 += -24 call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r3 = *(u64 *)(r10 - 16) r3 += 1808 r2 = 8 call bpf_probe_read_kernel
  79. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... r2 = 8 call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r3 = *(u64 *)(r10 - 16) r3 += 1808 r1 = r10 r1 += -24 call bpf_probe_read_kernel
  80. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... r3 = *(u64 *)(r10 - 16) r3 += 1808 call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r1 = r10 r1 += -24 r2 = 8 call bpf_probe_read_kernel
  81. bpf_probe_read_kernel(&start_time, sizeof(start_time), t->start_time); // ... struct task_struct *t = (void

    *)bpf_get_current_task(); u64 start_time = 0; // ... r3 = *(u64 *)(r10 - 16) r3 += 1808 call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r1 = r10 r1 += -24 r2 = 8 call bpf_probe_read_kernel
  82. struct task_struct Offset Field 0x00 unsigned int state; 0x710 u64

    start_time; u64 start_boottime; ... ... struct thread_info thread_info; 0x18 0x718 ... ...
  83. struct task_struct Offset Field 0x00 unsigned int state; 0x710 u64

    start_time; u64 start_boottime; ... ... struct thread_info thread_info; 0x18 0x718 ... ...
  84. // ... struct task_struct *t = (void *)bpf_get_current_task(); u64 start_time

    = 0; bpf_core_read(&start_time, sizeof(start_time), t->start_time); // ... call bpf_get_current_task r1 = r0 r1 <<= 32 r1 s>>= 32 *(u64 *)(r10 - 16) = r1 r1 = 0 *(u64 *)(r10 - 32) = r1 *(u64 *)(r10 - 24) = r1 r3 = *(u64 *)(r10 - 16) r3 += 1808 r1 = r10 r1 += -24 r2 = 8 call bpf_probe_read_kernel
  85. Sections: Idx Name Size VMA Type 0 00000000 0000000000000000 1

    .strtab 000000f0 0000000000000000 2 .text 00000000 0000000000000000 TEXT 3 tracepoint/raw_syscalls/sys_enter 000001e0 0000000000000000 TEXT 4 .reltracepoint/raw_syscalls/sys_enter 00000030 0000000000000000 5 license 0000000d 0000000000000000 DATA 6 .rodata 00000014 0000000000000000 DATA 7 .maps 00000030 0000000000000000 DATA 8 .bss 00000008 0000000000000000 BSS 9 .BTF 0000f1e0 0000000000000000 10 .rel.BTF 00000060 0000000000000000 11 .BTF.ext 000001cc 0000000000000000 12 .rel.BTF.ext 00000190 0000000000000000 13 .llvm_addrsig 00000006 0000000000000000 14 .symtab 00000108 0000000000000000
  86. 11 .BTF.ext 000001cc 0000000000000000 Sections: Idx Name Size VMA Type

    0 00000000 0000000000000000 1 .strtab 000000f0 0000000000000000 2 .text 00000000 0000000000000000 TEXT 3 tracepoint/raw_syscalls/sys_enter 000001e0 0000000000000000 TEXT 4 .reltracepoint/raw_syscalls/sys_enter 00000030 0000000000000000 5 license 0000000d 0000000000000000 DATA 6 .rodata 00000014 0000000000000000 DATA 7 .maps 00000030 0000000000000000 DATA 8 .bss 00000008 0000000000000000 BSS 9 .BTF 0000f1e0 0000000000000000 10 .rel.BTF 00000060 0000000000000000 12 .rel.BTF.ext 00000190 0000000000000000 13 .llvm_addrsig 00000006 0000000000000000 14 .symtab 00000108 0000000000000000
  87. struct btf_ext_header { __u16 magic; __u8 version; __u8 flags; __u32

    hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; __u32 line_info_off; __u32 line_info_len; /* optional part of .BTF.ext header */ __u32 core_relo_off; __u32 core_relo_len; };
  88. __u16 magic; __u8 version; __u8 flags; __u32 hdr_len; struct btf_ext_header

    { /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; __u32 line_info_off; __u32 line_info_len; /* optional part of .BTF.ext header */ __u32 core_relo_off; __u32 core_relo_len; };
  89. __u32 func_info_off; __u32 func_info_len; struct btf_ext_header { __u16 magic; __u8

    version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 line_info_off; __u32 line_info_len; /* optional part of .BTF.ext header */ __u32 core_relo_off; __u32 core_relo_len; };
  90. __u32 line_info_off; __u32 line_info_len; struct btf_ext_header { __u16 magic; __u8

    version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; /* optional part of .BTF.ext header */ __u32 core_relo_off; __u32 core_relo_len; };
  91. __u32 core_relo_off; __u32 core_relo_len; struct btf_ext_header { __u16 magic; __u8

    version; __u8 flags; __u32 hdr_len; /* All offsets are in bytes relative to the end of this header */ __u32 func_info_off; __u32 func_info_len; __u32 line_info_off; __u32 line_info_len; /* optional part of .BTF.ext header */ };
  92. struct btf_ext_info_sec { __u32 sec_name_off; __u32 num_info; /* Followed by

    num_info * record_size number of bytes */ __u8 data[]; };
  93. __u32 sec_name_off; struct btf_ext_info_sec { __u32 num_info; /* Followed by

    num_info * record_size number of bytes */ __u8 data[]; };
  94. __u32 num_info; struct btf_ext_info_sec { __u32 sec_name_off; /* Followed by

    num_info * record_size number of bytes */ __u8 data[]; };
  95. /* Followed by num_info * record_size number of bytes */

    __u8 data[]; struct btf_ext_info_sec { __u32 sec_name_off; __u32 num_info; };
  96. struct bpf_func_info { // offset where the function starts __u32

    insn_off; // KIND_FUNC BTF type that describes the function __u32 type_id; };
  97. // offset where the function starts __u32 insn_off; struct bpf_func_info

    { // KIND_FUNC BTF type that describes the function __u32 type_id; };
  98. // KIND_FUNC BTF type that describes the function __u32 type_id;

    struct bpf_func_info { // offset where the function starts __u32 insn_off; };
  99. struct bpf_line_info { // offset of the instruction this info

    is for __u32 insn_off; // offset of the file name in the string section __u32 file_name_off; // offset of the source line in the string section __u32 line_off; // line/column information of line in source file __u32 line_col; };
  100. // offset of the instruction this info is for __u32

    insn_off; struct bpf_line_info { // offset of the file name in the string section __u32 file_name_off; // offset of the source line in the string section __u32 line_off; // line/column information of line in source file __u32 line_col; };
  101. // offset of the file name in the string section

    __u32 file_name_off; struct bpf_line_info { // offset of the instruction this info is for __u32 insn_off; // offset of the source line in the string section __u32 line_off; // line/column information of line in source file __u32 line_col; };
  102. // offset of the source line in the string section

    __u32 line_off; struct bpf_line_info { // offset of the instruction this info is for __u32 insn_off; // offset of the file name in the string section __u32 file_name_off; // line/column information of line in source file __u32 line_col; };
  103. // line/column information of line in source file __u32 line_col;

    struct bpf_line_info { // offset of the instruction this info is for __u32 insn_off; // offset of the file name in the string section __u32 file_name_off; // offset of the source line in the string section __u32 line_off; };
  104. struct bpf_core_relo { // offset of the instruction to patch

    __u32 insn_off; // target type of the relocation __u32 type_id; // relocation access string (points to string in strings section) __u32 access_str_off; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; };
  105. // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind;

    struct bpf_core_relo { // offset of the instruction to patch __u32 insn_off; // target type of the relocation __u32 type_id; // relocation access string (points to string in strings section) __u32 access_str_off; };
  106. enum bpf_core_relo_kind { BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset

    */ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ }; Check out on Bootlin
  107. struct bpf_core_relo { // offset of the instruction to patch

    __u32 insn_off; // target type of the relocation __u32 type_id; // relocation access string (points to string in strings section) __u32 access_str_off; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; };
  108. // offset of the instruction to patch __u32 insn_off; struct

    bpf_core_relo { // target type of the relocation __u32 type_id; // relocation access string (points to string in strings section) __u32 access_str_off; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; };
  109. // target type of the relocation __u32 type_id; struct bpf_core_relo

    { // offset of the instruction to patch __u32 insn_off; // relocation access string (points to string in strings section) __u32 access_str_off; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; };
  110. // relocation access string (points to string in strings section)

    __u32 access_str_off; struct bpf_core_relo { // offset of the instruction to patch __u32 insn_off; // target type of the relocation __u32 type_id; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; };
  111. // relocation access string (points to string in strings section)

    __u32 access_str_off; // kind of relocation (see `enum bpf_core_relo_kind`) enum bpf_core_relo_kind kind; struct bpf_core_relo { // offset of the instruction to patch __u32 insn_off; // target type of the relocation __u32 type_id; };
  112. struct sample { int a; int b; int c; }

    __attribute__((preserve_access_index)); struct sample *s;
  113. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  114. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  115. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  116. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  117. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  118. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  119. Load Kernel BTF (vmlinux) Load Binary BTF Parse .BTF.ext Iterate

    over all relocations Find matching types by name in Kernel BTF Calculate field offset Patch relocation
  120. /* < Kernel 5.14 */ struct task_struct { struct thread_info

    thread_info; unsigned int state; void *stack; refcount_t usage; unsigned int flags; // ... }; /* >= Kernel 5.14 */ struct task_struct { struct thread_info thread_info; unsigned int __state; void *stack; refcount_t usage; unsigned int flags; // ... };
  121. unsigned int state; /* < Kernel 5.14 */ struct task_struct

    { struct thread_info thread_info; void *stack; refcount_t usage; unsigned int flags; // ... }; unsigned int __state; /* >= Kernel 5.14 */ struct task_struct { struct thread_info thread_info; void *stack; refcount_t usage; unsigned int flags; // ... };
  122. struct task_struct *t = (void *)bpf_get_current_task(); u32 state; if(bpf_core_field_exists(t->__state)) {

    state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  123. if(bpf_core_field_exists(t->__state)) { struct task_struct *t = (void *)bpf_get_current_task(); u32 state;

    state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  124. state = BPF_CORE_READ(t, __state); struct task_struct *t = (void *)bpf_get_current_task();

    u32 state; if(bpf_core_field_exists(t->__state)) { } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  125. struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state);

    struct task_struct *t = (void *)bpf_get_current_task(); u32 state; if(bpf_core_field_exists(t->__state)) { state = BPF_CORE_READ(t, __state); } else { }
  126. if(bpf_core_field_exists(t->__state)) { state = BPF_CORE_READ(t, __state); state = BPF_CORE_READ(t_old, state);

    struct task_struct *t = (void *)bpf_get_current_task(); u32 state; } else { struct task_struct___before514 *t_old = (void *)t; }
  127. // On kernel < 5.14 struct task_struct *t = (void

    *)bpf_get_current_task(); u32 state; // Loader will patch this to return false if(bpf_core_field_exists(t->__state)) { // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  128. // On kernel < 5.14 // Loader will patch this

    to return false if(bpf_core_field_exists(t->__state)) { struct task_struct *t = (void *)bpf_get_current_task(); u32 state; // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  129. // On kernel < 5.14 } else { struct task_struct___before514

    *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); struct task_struct *t = (void *)bpf_get_current_task(); u32 state; // Loader will patch this to return false if(bpf_core_field_exists(t->__state)) { // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t, __state); }
  130. // On kernel < 5.14 // Offset to read will

    be batched to 0xbad2310 state = BPF_CORE_READ(t, __state); struct task_struct *t = (void *)bpf_get_current_task(); u32 state; // Loader will patch this to return false if(bpf_core_field_exists(t->__state)) { } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  131. // On kernel < 5.14 struct task_struct *t = (void

    *)bpf_get_current_task(); u32 state; // Loader will patch this to return false if(bpf_core_field_exists(t->__state)) { // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; state = BPF_CORE_READ(t_old, state); }
  132. // On kernel >= 5.14 // Loader will patch this

    to return true if(bpf_core_field_exists(t->__state)) { struct task_struct *t = (void *)bpf_get_current_task(); u32 state; state = BPF_CORE_READ(t, __state); } else { struct task_struct___before514 *t_old = (void *)t; // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t_old, state); }
  133. // On kernel >= 5.14 state = BPF_CORE_READ(t, __state); struct

    task_struct *t = (void *)bpf_get_current_task(); u32 state; // Loader will patch this to return true if(bpf_core_field_exists(t->__state)) { } else { struct task_struct___before514 *t_old = (void *)t; // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t_old, state); }
  134. // On kernel >= 5.14 struct task_struct___before514 *t_old = (void

    *)t; // Offset to read will be batched to 0xbad2310 state = BPF_CORE_READ(t_old, state); struct task_struct *t = (void *)bpf_get_current_task(); u32 state; // Loader will patch this to return true if(bpf_core_field_exists(t->__state)) { state = BPF_CORE_READ(t, __state); } else { }