+check_flow(struct qosify_config *config, struct __sk_buff *skb,
+ struct qosify_dscp_val *out_val)
+{
+ struct flow_bucket flow_data;
+ struct flow_bucket *flow;
+ __u32 hash;
+
+ if (!(out_val->flags & (QOSIFY_VAL_FLAG_PRIO_CHECK |
+ QOSIFY_VAL_FLAG_BULK_CHECK)))
+ return;
+
+ if (!config)
+ return;
+
+ hash = bpf_get_hash_recalc(skb);
+ flow = bpf_map_lookup_elem(&flow_map, &hash);
+ if (!flow) {
+ memset(&flow_data, 0, sizeof(flow_data));
+ bpf_map_update_elem(&flow_map, &hash, &flow_data, BPF_ANY);
+ flow = bpf_map_lookup_elem(&flow_map, &hash);
+ if (!flow)
+ return;
+ }
+
+
+ if (out_val->flags & QOSIFY_VAL_FLAG_BULK_CHECK)
+ check_flow_bulk(config, skb, flow, out_val);
+ if (out_val->flags & QOSIFY_VAL_FLAG_PRIO_CHECK)
+ check_flow_prio(config, skb, flow, out_val);
+
+ if (flow->val.flags & out_val->flags)
+ *out_val = flow->val;
+}
+
+static __always_inline struct qosify_ip_map_val *
+parse_ipv4(struct qosify_config *config, struct __sk_buff *skb, __u32 *offset,
+ bool ingress, struct qosify_dscp_val *out_val)