batman-adv: upgrade package to latest release 2017.2
[feed/routing.git] / batman-adv / files / compat-hacks.h
1 /* Please avoid adding hacks here - instead add it to mac80211/backports.git */
2
3 #undef CONFIG_MODULE_STRIPPED
4
5 #include <linux/version.h> /* LINUX_VERSION_CODE */
6 #include <linux/types.h>
7
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
9
10 #define dev_get_iflink(_net_dev) ((_net_dev)->iflink)
11
12 #endif /* < KERNEL_VERSION(4, 1, 0) */
13
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
15
16 /* Linux 3.15 misses the uapi include.... */
17 #include <uapi/linux/nl80211.h>
18
19 #endif /* < KERNEL_VERSION(3, 16, 0) */
20
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
22
23 #include <linux/netdevice.h>
24
25 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
26 BUILD_BUG_ON(upper_priv != NULL); \
27 BUILD_BUG_ON(upper_info != NULL); \
28 netdev_set_master(dev, upper_dev); \
29 })
30
31 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
32
33 #include <linux/netdevice.h>
34
35 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
36 BUILD_BUG_ON(upper_priv != NULL); \
37 BUILD_BUG_ON(upper_info != NULL); \
38 netdev_master_upper_dev_link(dev, upper_dev); \
39 })
40
41 #endif /* < KERNEL_VERSION(4, 5, 0) */
42
43
44 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
45
46 /* for batadv_v_elp_get_throughput which would have used
47 * STATION_INFO_EXPECTED_THROUGHPUT in Linux 4.0.0
48 */
49 #define NL80211_STA_INFO_EXPECTED_THROUGHPUT 28
50
51 /* wild hack for batadv_getlink_net only */
52 #define get_link_net get_xstats_size || 1 ? fallback_net : (struct net*)netdev->rtnl_link_ops->get_xstats_size
53
54 #endif /* < KERNEL_VERSION(4, 0, 0) */
55
56
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
58
59 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
60 unsigned int transport_len,
61 __sum16(*skb_chkf)(struct sk_buff *skb));
62
63 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
64
65 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
66
67 #endif /* < KERNEL_VERSION(4, 2, 0) */
68
69 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
70
71 #define IFF_NO_QUEUE 0; dev->tx_queue_len = 0
72
73 static inline bool hlist_fake(struct hlist_node *h)
74 {
75 return h->pprev == &h->next;
76 }
77
78 #endif /* < KERNEL_VERSION(4, 3, 0) */
79
80 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
81
82 #include <linux/ethtool.h>
83
84 #define ethtool_link_ksettings batadv_ethtool_link_ksettings
85
86 struct batadv_ethtool_link_ksettings {
87 struct {
88 __u32 speed;
89 __u8 duplex;
90 } base;
91 };
92
93 #define __ethtool_get_link_ksettings(__dev, __link_settings) \
94 batadv_ethtool_get_link_ksettings(__dev, __link_settings)
95
96 static inline int
97 batadv_ethtool_get_link_ksettings(struct net_device *dev,
98 struct ethtool_link_ksettings *link_ksettings)
99 {
100 struct ethtool_cmd cmd;
101 int ret;
102
103 memset(&cmd, 0, sizeof(cmd));
104 ret = __ethtool_get_settings(dev, &cmd);
105
106 if (ret != 0)
107 return ret;
108
109 link_ksettings->base.duplex = cmd.duplex;
110 link_ksettings->base.speed = ethtool_cmd_speed(&cmd);
111
112 return 0;
113 }
114
115 #endif /* < KERNEL_VERSION(4, 6, 0) */
116
117 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
118
119 #define netif_trans_update batadv_netif_trans_update
120 static inline void batadv_netif_trans_update(struct net_device *dev)
121 {
122 dev->trans_start = jiffies;
123 }
124
125 #endif /* < KERNEL_VERSION(4, 7, 0) */
126
127
128 #include_next <linux/netlink.h>
129
130 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
131
132 #include_next <net/netlink.h>
133
134 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb);
135
136 static inline int batadv_nla_align_64bit(struct sk_buff *skb, int padattr)
137 {
138 if (batadv_nla_need_padding_for_64bit(skb) &&
139 !nla_reserve(skb, padattr, 0))
140 return -EMSGSIZE;
141
142 return 0;
143 }
144
145 static inline struct nlattr *batadv__nla_reserve_64bit(struct sk_buff *skb,
146 int attrtype,
147 int attrlen, int padattr)
148 {
149 if (batadv_nla_need_padding_for_64bit(skb))
150 batadv_nla_align_64bit(skb, padattr);
151
152 return __nla_reserve(skb, attrtype, attrlen);
153 }
154
155 static inline void batadv__nla_put_64bit(struct sk_buff *skb, int attrtype,
156 int attrlen, const void *data,
157 int padattr)
158 {
159 struct nlattr *nla;
160
161 nla = batadv__nla_reserve_64bit(skb, attrtype, attrlen, padattr);
162 memcpy(nla_data(nla), data, attrlen);
163 }
164
165 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb)
166 {
167 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
168 /* The nlattr header is 4 bytes in size, that's why we test
169 * if the skb->data _is_ aligned. A NOP attribute, plus
170 * nlattr header for next attribute, will make nla_data()
171 * 8-byte aligned.
172 */
173 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
174 return true;
175 #endif
176 return false;
177 }
178
179 static inline int batadv_nla_total_size_64bit(int payload)
180 {
181 return NLA_ALIGN(nla_attr_size(payload))
182 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
183 + NLA_ALIGN(nla_attr_size(0))
184 #endif
185 ;
186 }
187
188 static inline int batadv_nla_put_64bit(struct sk_buff *skb, int attrtype,
189 int attrlen, const void *data,
190 int padattr)
191 {
192 size_t len;
193
194 if (batadv_nla_need_padding_for_64bit(skb))
195 len = batadv_nla_total_size_64bit(attrlen);
196 else
197 len = nla_total_size(attrlen);
198 if (unlikely(skb_tailroom(skb) < len))
199 return -EMSGSIZE;
200
201 batadv__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
202 return 0;
203 }
204
205 #define nla_put_u64_64bit(_skb, _attrtype, _value, _padattr) \
206 batadv_nla_put_u64_64bit(_skb, _attrtype, _value, _padattr)
207 static inline int batadv_nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
208 u64 value, int padattr)
209 {
210 return batadv_nla_put_64bit(skb, attrtype, sizeof(u64), &value,
211 padattr);
212 }
213
214 #endif /* < KERNEL_VERSION(4, 7, 0) */
215
216
217 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
218
219 #include_next <linux/cache.h>
220
221 /* hack for netlink.c which marked the family ops as ro */
222 #ifdef __ro_after_init
223 #undef __ro_after_init
224 #endif
225 #define __ro_after_init
226
227 #endif /* < KERNEL_VERSION(4, 10, 0) */
228
229 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
230
231 /* work around missing attribute needs_free_netdev and priv_destructor in
232 * net_device
233 */
234 #define ether_setup(dev) \
235 void batadv_softif_free2(struct net_device *dev) \
236 { \
237 batadv_softif_free(dev); \
238 free_netdev(dev); \
239 } \
240 void (*t1)(struct net_device *dev) __attribute__((unused)); \
241 bool t2 __attribute__((unused)); \
242 ether_setup(dev)
243 #define needs_free_netdev destructor = batadv_softif_free2; t2
244 #define priv_destructor destructor = batadv_softif_free2; t1
245
246 #endif /* < KERNEL_VERSION(4, 11, 9) */
247
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
249
250 static inline void *batadv_skb_put(struct sk_buff *skb, unsigned int len)
251 {
252 return (void *)skb_put(skb, len);
253 }
254 #define skb_put batadv_skb_put
255
256 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
257 {
258 void *tmp = skb_put(skb, len);
259
260 memset(tmp, 0, len);
261
262 return tmp;
263 }
264
265 static inline void *skb_put_data(struct sk_buff *skb, const void *data,
266 unsigned int len)
267 {
268 void *tmp = skb_put(skb, len);
269
270 memcpy(tmp, data, len);
271
272 return tmp;
273 }
274
275 #endif /* < KERNEL_VERSION(4, 13, 0) */
276
277 /* <DECLARE_EWMA> */
278
279 #include <linux/version.h>
280 #include_next <linux/average.h>
281
282 #include <linux/bug.h>
283
284 #ifdef DECLARE_EWMA
285 #undef DECLARE_EWMA
286 #endif /* DECLARE_EWMA */
287
288 /*
289 * Exponentially weighted moving average (EWMA)
290 *
291 * This implements a fixed-precision EWMA algorithm, with both the
292 * precision and fall-off coefficient determined at compile-time
293 * and built into the generated helper funtions.
294 *
295 * The first argument to the macro is the name that will be used
296 * for the struct and helper functions.
297 *
298 * The second argument, the precision, expresses how many bits are
299 * used for the fractional part of the fixed-precision values.
300 *
301 * The third argument, the weight reciprocal, determines how the
302 * new values will be weighed vs. the old state, new values will
303 * get weight 1/weight_rcp and old values 1-1/weight_rcp. Note
304 * that this parameter must be a power of two for efficiency.
305 */
306
307 #define DECLARE_EWMA(name, _precision, _weight_rcp) \
308 struct ewma_##name { \
309 unsigned long internal; \
310 }; \
311 static inline void ewma_##name##_init(struct ewma_##name *e) \
312 { \
313 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
314 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
315 /* \
316 * Even if you want to feed it just 0/1 you should have \
317 * some bits for the non-fractional part... \
318 */ \
319 BUILD_BUG_ON((_precision) > 30); \
320 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
321 e->internal = 0; \
322 } \
323 static inline unsigned long \
324 ewma_##name##_read(struct ewma_##name *e) \
325 { \
326 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
327 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
328 BUILD_BUG_ON((_precision) > 30); \
329 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
330 return e->internal >> (_precision); \
331 } \
332 static inline void ewma_##name##_add(struct ewma_##name *e, \
333 unsigned long val) \
334 { \
335 unsigned long internal = ACCESS_ONCE(e->internal); \
336 unsigned long weight_rcp = ilog2(_weight_rcp); \
337 unsigned long precision = _precision; \
338 \
339 BUILD_BUG_ON(!__builtin_constant_p(_precision)); \
340 BUILD_BUG_ON(!__builtin_constant_p(_weight_rcp)); \
341 BUILD_BUG_ON((_precision) > 30); \
342 BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
343 \
344 ACCESS_ONCE(e->internal) = internal ? \
345 (((internal << weight_rcp) - internal) + \
346 (val << precision)) >> weight_rcp : \
347 (val << precision); \
348 }
349
350 /* </DECLARE_EWMA> */