batman-adv: Prevent FTBFS when redefining ether_setup
[feed/routing.git] / batman-adv / files / compat-hacks.h
1 /* Please avoid adding hacks here - instead add it to mac80211/backports.git */
2
3 #undef CONFIG_MODULE_STRIPPED
4
5 #include <linux/version.h> /* LINUX_VERSION_CODE */
6 #include <linux/types.h>
7
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
9
10 #define dev_get_iflink(_net_dev) ((_net_dev)->iflink)
11
12 #endif /* < KERNEL_VERSION(4, 1, 0) */
13
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
15
16 /* Linux 3.15 misses the uapi include.... */
17 #include <uapi/linux/nl80211.h>
18
19 #endif /* < KERNEL_VERSION(3, 16, 0) */
20
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
22
23 #include <linux/netdevice.h>
24
25 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
26 BUILD_BUG_ON(upper_priv != NULL); \
27 BUILD_BUG_ON(upper_info != NULL); \
28 netdev_set_master(dev, upper_dev); \
29 })
30
31 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
32
33 #include <linux/netdevice.h>
34
35 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
36 BUILD_BUG_ON(upper_priv != NULL); \
37 BUILD_BUG_ON(upper_info != NULL); \
38 netdev_master_upper_dev_link(dev, upper_dev); \
39 })
40
41 #endif /* < KERNEL_VERSION(4, 5, 0) */
42
43
44 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
45
46 /* for batadv_v_elp_get_throughput which would have used
47 * STATION_INFO_EXPECTED_THROUGHPUT in Linux 4.0.0
48 */
49 #define NL80211_STA_INFO_EXPECTED_THROUGHPUT 28
50
51 /* wild hack for batadv_getlink_net only */
52 #define get_link_net get_xstats_size || 1 ? fallback_net : (struct net*)netdev->rtnl_link_ops->get_xstats_size
53
54 #endif /* < KERNEL_VERSION(4, 0, 0) */
55
56
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
58
59 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
60 unsigned int transport_len,
61 __sum16(*skb_chkf)(struct sk_buff *skb));
62
63 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
64
65 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
66
67 #endif /* < KERNEL_VERSION(4, 2, 0) */
68
69 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
70
71 #define IFF_NO_QUEUE 0; dev->tx_queue_len = 0
72
73 static inline bool hlist_fake(struct hlist_node *h)
74 {
75 return h->pprev == &h->next;
76 }
77
78 #endif /* < KERNEL_VERSION(4, 3, 0) */
79
80 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
81
82 #include <linux/ethtool.h>
83
84 #define ethtool_link_ksettings batadv_ethtool_link_ksettings
85
86 struct batadv_ethtool_link_ksettings {
87 struct {
88 __u32 speed;
89 __u8 duplex;
90 } base;
91 };
92
93 #define __ethtool_get_link_ksettings(__dev, __link_settings) \
94 batadv_ethtool_get_link_ksettings(__dev, __link_settings)
95
96 static inline int
97 batadv_ethtool_get_link_ksettings(struct net_device *dev,
98 struct ethtool_link_ksettings *link_ksettings)
99 {
100 struct ethtool_cmd cmd;
101 int ret;
102
103 memset(&cmd, 0, sizeof(cmd));
104 ret = __ethtool_get_settings(dev, &cmd);
105
106 if (ret != 0)
107 return ret;
108
109 link_ksettings->base.duplex = cmd.duplex;
110 link_ksettings->base.speed = ethtool_cmd_speed(&cmd);
111
112 return 0;
113 }
114
115 #endif /* < KERNEL_VERSION(4, 6, 0) */
116
117 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
118
119 #define netif_trans_update batadv_netif_trans_update
120 static inline void batadv_netif_trans_update(struct net_device *dev)
121 {
122 dev->trans_start = jiffies;
123 }
124
125 #endif /* < KERNEL_VERSION(4, 7, 0) */
126
127
128 #include_next <linux/netlink.h>
129
130 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
131
132 #include_next <net/netlink.h>
133
134 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb);
135
136 static inline int batadv_nla_align_64bit(struct sk_buff *skb, int padattr)
137 {
138 if (batadv_nla_need_padding_for_64bit(skb) &&
139 !nla_reserve(skb, padattr, 0))
140 return -EMSGSIZE;
141
142 return 0;
143 }
144
145 static inline struct nlattr *batadv__nla_reserve_64bit(struct sk_buff *skb,
146 int attrtype,
147 int attrlen, int padattr)
148 {
149 if (batadv_nla_need_padding_for_64bit(skb))
150 batadv_nla_align_64bit(skb, padattr);
151
152 return __nla_reserve(skb, attrtype, attrlen);
153 }
154
155 static inline void batadv__nla_put_64bit(struct sk_buff *skb, int attrtype,
156 int attrlen, const void *data,
157 int padattr)
158 {
159 struct nlattr *nla;
160
161 nla = batadv__nla_reserve_64bit(skb, attrtype, attrlen, padattr);
162 memcpy(nla_data(nla), data, attrlen);
163 }
164
165 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb)
166 {
167 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
168 /* The nlattr header is 4 bytes in size, that's why we test
169 * if the skb->data _is_ aligned. A NOP attribute, plus
170 * nlattr header for next attribute, will make nla_data()
171 * 8-byte aligned.
172 */
173 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
174 return true;
175 #endif
176 return false;
177 }
178
179 static inline int batadv_nla_total_size_64bit(int payload)
180 {
181 return NLA_ALIGN(nla_attr_size(payload))
182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
183 + NLA_ALIGN(nla_attr_size(0))
184 #endif
185 ;
186 }
187
188 static inline int batadv_nla_put_64bit(struct sk_buff *skb, int attrtype,
189 int attrlen, const void *data,
190 int padattr)
191 {
192 size_t len;
193
194 if (batadv_nla_need_padding_for_64bit(skb))
195 len = batadv_nla_total_size_64bit(attrlen);
196 else
197 len = nla_total_size(attrlen);
198 if (unlikely(skb_tailroom(skb) < len))
199 return -EMSGSIZE;
200
201 batadv__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
202 return 0;
203 }
204
205 #define nla_put_u64_64bit(_skb, _attrtype, _value, _padattr) \
206 batadv_nla_put_u64_64bit(_skb, _attrtype, _value, _padattr)
207 static inline int batadv_nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
208 u64 value, int padattr)
209 {
210 return batadv_nla_put_64bit(skb, attrtype, sizeof(u64), &value,
211 padattr);
212 }
213
214 #endif /* < KERNEL_VERSION(4, 7, 0) */
215
216
217 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
218
219 #include_next <linux/cache.h>
220
221 /* hack for netlink.c which marked the family ops as ro */
222 #ifdef __ro_after_init
223 #undef __ro_after_init
224 #endif
225 #define __ro_after_init
226
227 #endif /* < KERNEL_VERSION(4, 10, 0) */
228
229 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
230
231 #include <linux/netdevice.h>
232
233 /* work around missing attribute needs_free_netdev and priv_destructor in
234 * net_device
235 */
236 #define ether_setup(dev) \
237 void batadv_softif_free2(struct net_device *dev) \
238 { \
239 batadv_softif_free(dev); \
240 free_netdev(dev); \
241 } \
242 void (*t1)(struct net_device *dev) __attribute__((unused)); \
243 bool t2 __attribute__((unused)); \
244 ether_setup(dev)
245 #define needs_free_netdev destructor = batadv_softif_free2; t2
246 #define priv_destructor destructor = batadv_softif_free2; t1
247
248 #endif /* < KERNEL_VERSION(4, 11, 9) */
249
250 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
251
252 static inline void *batadv_skb_put(struct sk_buff *skb, unsigned int len)
253 {
254 return (void *)skb_put(skb, len);
255 }
256 #define skb_put batadv_skb_put
257
258 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
259 {
260 void *tmp = skb_put(skb, len);
261
262 memset(tmp, 0, len);
263
264 return tmp;
265 }
266
267 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
268 unsigned int len)
269 {
270 void *tmp = skb_put(skb, len);
271
272 memcpy(tmp, data, len);
273
274 return tmp;
275 }
276
277 #endif /* < KERNEL_VERSION(4, 13, 0) */
278
279 /* <DECLARE_EWMA> */
280
281 #include <linux/version.h>
282 #include_next <linux/average.h>
283
284 #include <linux/bug.h>
285
286 #ifdef DECLARE_EWMA
287 #undef DECLARE_EWMA
288 #endif /* DECLARE_EWMA */
289
290 /*
291 * Exponentially weighted moving average (EWMA)
292 *
293 * This implements a fixed-precision EWMA algorithm, with both the
294 * precision and fall-off coefficient determined at compile-time
295 * and built into the generated helper funtions.
296 *
297 * The first argument to the macro is the name that will be used
298 * for the struct and helper functions.
299 *
300 * The second argument, the precision, expresses how many bits are
301 * used for the fractional part of the fixed-precision values.
302 *
303 * The third argument, the weight reciprocal, determines how the
304 * new values will be weighed vs. the old state, new values will
305 * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
306 * that this parameter must be a power of two for efficiency.
307 */
308
309 #define DECLARE_EWMA(name, _precision, _weight_rcp) \
310 struct ewma_##name { \
311 unsigned long internal; \
312 }; \
313 static inline void ewma_##name##_init(struct ewma_##name *e) \
314 { \
315 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
316 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
317 /* \
318 * Even if you want to feed it just 0/1 you should have \
319 * some bits for the non-fractional part... \
320 */ \
321 BUILD_BUG_ON((_precision) > 30); \
322 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
323 e->internal = 0; \
324 } \
325 static inline unsigned long \
326 ewma_##name##_read(struct ewma_##name *e) \
327 { \
328 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
329 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
330 BUILD_BUG_ON((_precision) > 30); \
331 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
332 return e->internal >> (_precision); \
333 } \
334 static inline void ewma_##name##_add(struct ewma_##name *e, \
335 unsigned long val) \
336 { \
337 unsigned long internal = ACCESS_ONCE(e->internal); \
338 unsigned long weight_rcp = ilog2(_weight_rcp); \
339 unsigned long precision = _precision; \
340 \
341 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
342 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
343 BUILD_BUG_ON((_precision) > 30); \
344 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
345 \
346 ACCESS_ONCE(e->internal) = internal ? \
347 (((internal << weight_rcp) - internal) + \
348 (val << precision)) >> weight_rcp : \
349 (val << precision); \
350 }
351
352 /* </DECLARE_EWMA> */