Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add eBPF C code linter #314

Merged
merged 1 commit into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,10 @@ fmt: ## Run go fmt against code.

.PHONY: lint
lint: prereqs ## Lint the code
@echo "### Linting code"
@echo "### Linting golang code"
golangci-lint run ./... --timeout=3m
@echo "### Linting bpf C code"
find ./bpf -type f -not -path "./bpf/headers/*" -name "*.[ch]" | xargs clang-format --dry-run --Werror

.PHONY: gen-bpf
gen-bpf: export BPF_CLANG := $(CLANG)
Expand Down
56 changes: 28 additions & 28 deletions bpf/dns_tracker.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
#define __DNS_TRACKER_H__
#include "utils.h"

#define DNS_PORT 53
#define DNS_QR_FLAG 0x8000
#define UDP_MAXMSG 512
#define EINVAL 22
#define DNS_PORT 53
#define DNS_QR_FLAG 0x8000
#define UDP_MAXMSG 512
#define EINVAL 22

struct dns_header {
u16 id;
Expand All @@ -20,7 +20,7 @@ struct dns_header {
u16 arcount;
};

static inline void fill_dns_id (flow_id *id, dns_flow_id *dns_flow, u16 dns_id, bool reverse) {
static inline void fill_dns_id(flow_id *id, dns_flow_id *dns_flow, u16 dns_id, bool reverse) {
dns_flow->id = dns_id;
dns_flow->protocol = id->transport_protocol;
if (reverse) {
Expand All @@ -39,28 +39,28 @@ static inline void fill_dns_id (flow_id *id, dns_flow_id *dns_flow, u16 dns_id,
static __always_inline u8 calc_dns_header_offset(pkt_info *pkt, void *data_end) {
u8 len = 0;
switch (pkt->id->transport_protocol) {
case IPPROTO_TCP: {
struct tcphdr *tcp = (struct tcphdr *) pkt->l4_hdr;
if (!tcp || ((void *)tcp + sizeof(*tcp) > data_end)) {
return 0;
}
len = tcp->doff * sizeof(u32) + 2; // DNS over TCP has 2 bytes of length at the beginning
break;
case IPPROTO_TCP: {
struct tcphdr *tcp = (struct tcphdr *)pkt->l4_hdr;
if (!tcp || ((void *)tcp + sizeof(*tcp) > data_end)) {
return 0;
}
case IPPROTO_UDP: {
struct udphdr *udp = (struct udphdr *) pkt->l4_hdr;
if (!udp || ((void *)udp + sizeof(*udp) > data_end)) {
return 0;
}
len = bpf_ntohs(udp->len);
// make sure udp payload doesn't exceed max msg size
if (len - sizeof(struct udphdr) > UDP_MAXMSG) {
return 0;
}
// set the length to udp hdr size as it will be used to locate dns header
len = sizeof(struct udphdr);
break;
len = tcp->doff * sizeof(u32) + 2; // DNS over TCP has 2 bytes of length at the beginning
break;
}
case IPPROTO_UDP: {
struct udphdr *udp = (struct udphdr *)pkt->l4_hdr;
if (!udp || ((void *)udp + sizeof(*udp) > data_end)) {
return 0;
}
len = bpf_ntohs(udp->len);
// make sure udp payload doesn't exceed max msg size
if (len - sizeof(struct udphdr) > UDP_MAXMSG) {
return 0;
}
// set the length to udp hdr size as it will be used to locate dns header
len = sizeof(struct udphdr);
break;
}
}
return len;
}
Expand Down Expand Up @@ -95,14 +95,14 @@ static __always_inline int track_dns_packet(struct __sk_buff *skb, pkt_info *pkt
} else { /* dns response */
fill_dns_id(pkt->id, &dns_req, dns_id, true);
u64 *value = bpf_map_lookup_elem(&dns_flows, &dns_req);
if (value != NULL) {
if (value != NULL) {
pkt->dns_latency = ts - *value;
pkt->dns_id = dns_id;
pkt->dns_flags = flags;
bpf_map_delete_elem(&dns_flows, &dns_req);
}
}
} // end of dns response
} // end of dns port check
} // end of dns port check
return 0;
}

Expand Down
10 changes: 5 additions & 5 deletions bpf/flows.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ static inline int flow_monitor(struct __sk_buff *skb, u8 direction) {

// check if this packet need to be filtered if filtering feature is enabled
if (enable_flows_filtering) {
u32* filter_counter_p = NULL;
u32 *filter_counter_p = NULL;
u32 initVal = 1, key = 0;
if (is_flow_filtered(&id, &action) != 0 && action != MAX_FILTER_ACTIONS) {
if (is_flow_filtered(&id, &action) != 0 && action != MAX_FILTER_ACTIONS) {
// we have matching rules follow through the actions to decide if we should accept or reject the flow
// and update global counter for both cases
u32 reject_key = FILTER_FLOWS_REJECT_KEY, accept_key = FILTER_FLOWS_ACCEPT_KEY;
Expand Down Expand Up @@ -116,7 +116,7 @@ static inline int flow_monitor(struct __sk_buff *skb, u8 direction) {
if (action == ACCEPT || action == MAX_FILTER_ACTIONS) {
return TC_ACT_OK;
} else {
// we have reject rule and no match so we can add the flows to the hashmap table.
// we have reject rule and no match so we can add the flows to the hashmap table.
}
}
}
Expand Down Expand Up @@ -197,7 +197,8 @@ static inline int flow_monitor(struct __sk_buff *skb, u8 direction) {
}

new_flow.errno = -ret;
flow_record *record = (flow_record *)bpf_ringbuf_reserve(&direct_flows, sizeof(flow_record), 0);
flow_record *record =
(flow_record *)bpf_ringbuf_reserve(&direct_flows, sizeof(flow_record), 0);
if (!record) {
if (trace_messages) {
bpf_printk("couldn't reserve space in the ringbuf. Dropping flow");
Expand All @@ -223,4 +224,3 @@ int egress_flow_parse(struct __sk_buff *skb) {
}

char _license[] SEC("license") = "GPL";

16 changes: 9 additions & 7 deletions bpf/flows_filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ static __always_inline int is_equal_ip(u8 *ip1, u8 *ip2, u8 len) {
return 0;
}
}
return 1;
return 1;
}

static __always_inline int do_flow_filter_lookup(flow_id *id, struct filter_key_t *key, filter_action *action,
u8 len, u8 offset) {
static __always_inline int do_flow_filter_lookup(flow_id *id, struct filter_key_t *key,
filter_action *action, u8 len, u8 offset) {
int result = 0;

struct filter_value_t *rule = (struct filter_value_t *)bpf_map_lookup_elem(&filter_map, key);
Expand Down Expand Up @@ -68,7 +68,8 @@ static __always_inline int do_flow_filter_lookup(flow_id *id, struct filter_key_
goto end;
}
} else if (rule->dstPortStart != 0 && rule->dstPortEnd != 0) {
if (rule->dstPortStart <= id->dst_port && id->dst_port <= rule->dstPortEnd) {
if (rule->dstPortStart <= id->dst_port &&
id->dst_port <= rule->dstPortEnd) {
BPF_PRINTK("dstPortStart and dstPortEnd matched\n");
result++;
} else {
Expand All @@ -86,7 +87,8 @@ static __always_inline int do_flow_filter_lookup(flow_id *id, struct filter_key_
goto end;
}
} else if (rule->srcPortStart != 0 && rule->srcPortEnd != 0) {
if (rule->srcPortStart <= id->src_port && id->src_port <= rule->srcPortEnd) {
if (rule->srcPortStart <= id->src_port &&
id->src_port <= rule->srcPortEnd) {
BPF_PRINTK("srcPortStart and srcPortEnd matched\n");
result++;
} else {
Expand Down Expand Up @@ -137,8 +139,8 @@ static __always_inline int do_flow_filter_lookup(flow_id *id, struct filter_key_
break;
}
} else {
result = 0;
goto end;
result = 0;
goto end;
}
}

Expand Down
6 changes: 3 additions & 3 deletions bpf/maps_definition.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,16 @@ struct {
//PerfEvent Array for Packet Payloads
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__type(key, u32);
__type(value, u32);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 256);
} packet_record SEC(".maps");

// DNS tracking flow based hashmap used to correlate query and responses
// to allow calculating latency in ebpf agent directly
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1 << 20); // Will take around 64MB of space.
__uint(max_entries, 1 << 20); // Will take around 64MB of space.
__type(key, dns_flow_id);
__type(value, u64);
__uint(map_flags, BPF_F_NO_PREALLOC);
Expand Down
114 changes: 55 additions & 59 deletions bpf/pca.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,119 +3,115 @@

#include "utils.h"

static int attach_packet_payload(void *data, void *data_end, struct __sk_buff *skb){
payload_meta meta;
u64 flags = BPF_F_CURRENT_CPU;
// Enable the flag to add packet header
// Packet payload follows immediately after the meta struct
u32 packetSize = (u32)(data_end-data);
// Record the current time.
u64 current_time = bpf_ktime_get_ns();
// For packets which are allocated non-linearly struct __sk_buff does not necessarily
// has all data lined up in memory but instead can be part of scatter gather lists.
// This command pulls data from the buffer but incurs data copying penalty.
if (packetSize <= skb->len){
packetSize = skb->len;
if (bpf_skb_pull_data(skb, skb->len)){
return TC_ACT_UNSPEC;
};
}
// Set flag's upper 32 bits with the size of the paylaod and the bpf_perf_event_output will
// attach the specified amount of bytes from packet to the perf event
// https://github.com/xdp-project/xdp-tutorial/tree/9b25f0a039179aca1f66cba5492744d9f09662c1/tracing04-xdp-tcpdump
flags |= (u64)packetSize << 32;

meta.if_index = skb->ifindex;
meta.pkt_len = packetSize;
meta.timestamp = current_time;
if (bpf_perf_event_output(skb, &packet_record, flags, &meta, sizeof(meta))){
return TC_ACT_OK;
}
return TC_ACT_UNSPEC;
static int attach_packet_payload(void *data, void *data_end, struct __sk_buff *skb) {
payload_meta meta;
u64 flags = BPF_F_CURRENT_CPU;
// Enable the flag to add packet header
// Packet payload follows immediately after the meta struct
u32 packetSize = (u32)(data_end - data);

// Record the current time.
u64 current_time = bpf_ktime_get_ns();

// For packets which are allocated non-linearly struct __sk_buff does not necessarily
// has all data lined up in memory but instead can be part of scatter gather lists.
// This command pulls data from the buffer but incurs data copying penalty.
if (packetSize <= skb->len) {
packetSize = skb->len;
if (bpf_skb_pull_data(skb, skb->len)) {
return TC_ACT_UNSPEC;
};
}
// Set flag's upper 32 bits with the size of the paylaod and the bpf_perf_event_output will
// attach the specified amount of bytes from packet to the perf event
// https://github.com/xdp-project/xdp-tutorial/tree/9b25f0a039179aca1f66cba5492744d9f09662c1/tracing04-xdp-tcpdump
flags |= (u64)packetSize << 32;

meta.if_index = skb->ifindex;
meta.pkt_len = packetSize;
meta.timestamp = current_time;
if (bpf_perf_event_output(skb, &packet_record, flags, &meta, sizeof(meta))) {
return TC_ACT_OK;
}
return TC_ACT_UNSPEC;
}

static inline bool validate_pca_filter(u8 ipproto, void *ipheaderend, void *data_end){
static inline bool validate_pca_filter(u8 ipproto, void *ipheaderend, void *data_end) {
// If filters: pca_proto and pca_port are not specified, export packet
if (pca_proto == 0 && pca_port == 0)
return true;

//Only export packets with protocol set by ENV var PCA_FILTER
u16 sourcePort, destPort;
if (ipproto != pca_proto) {
return false;
return false;
}

if (ipproto == IPPROTO_TCP){
if (ipproto == IPPROTO_TCP) {
struct tcphdr *tcp_header = ipheaderend;
if ((void *)tcp_header + sizeof(*tcp_header) > data_end) {
return false;
}
sourcePort = tcp_header->source;
destPort = tcp_header->dest;
}
else if (ipproto == IPPROTO_UDP){
} else if (ipproto == IPPROTO_UDP) {
struct udphdr *udp_header = ipheaderend;
if ((void *)udp_header + sizeof(*udp_header) > data_end) {
return false;
return false;
}
sourcePort = udp_header->source;
destPort = udp_header->dest;
}
else if (ipproto == IPPROTO_SCTP){
} else if (ipproto == IPPROTO_SCTP) {
struct sctphdr *sctp_header = ipheaderend;
if ((void *)sctp_header + sizeof(*sctp_header) > data_end) {
return false;
}
sourcePort = sctp_header->source;
destPort = sctp_header->dest;
}
else {
return false;
}
sourcePort = sctp_header->source;
destPort = sctp_header->dest;
} else {
return false;
}
u16 pca_port_end = bpf_htons(pca_port);
if (sourcePort == pca_port_end || destPort == pca_port_end){
if (sourcePort == pca_port_end || destPort == pca_port_end) {
return true;
}
return false;
}

static inline int export_packet_payload (struct __sk_buff *skb) {
static inline int export_packet_payload(struct __sk_buff *skb) {
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ethhdr *eth = data;
struct iphdr *ip;
struct ethhdr *eth = data;
struct iphdr *ip;

if ((void *)eth + sizeof(*eth) > data_end) {
return TC_ACT_UNSPEC;
return TC_ACT_UNSPEC;
}

// Only IPv4 and IPv6 packets captured
u16 ethType = bpf_ntohs(eth->h_proto);
if (ethType != ETH_P_IP && ethType != ETH_P_IPV6) {
return TC_ACT_UNSPEC;
}
return TC_ACT_UNSPEC;
}

ip = data + sizeof(*eth);
if ((void *)ip + sizeof(*ip) > data_end) {
return TC_ACT_UNSPEC;
return TC_ACT_UNSPEC;
}

if (validate_pca_filter(ip->protocol, (void *)ip + sizeof(*ip), data_end )){
if (validate_pca_filter(ip->protocol, (void *)ip + sizeof(*ip), data_end)) {
return attach_packet_payload(data, data_end, skb);
}
return TC_ACT_UNSPEC;
}


SEC("tc_pca_ingress")
int ingress_pca_parse (struct __sk_buff *skb) {
int ingress_pca_parse(struct __sk_buff *skb) {
return export_packet_payload(skb);
}

SEC("tc_pca_egress")
int egress_pca_parse (struct __sk_buff *skb) {
int egress_pca_parse(struct __sk_buff *skb) {
return export_packet_payload(skb);
}

Expand Down
Loading
Loading