32903b569fa99f36d86780d381a42aa82f1744c3
[feed/routing.git] / batman-adv / files / compat-hacks.h
1 /* Please avoid adding hacks here - instead add it to mac80211/backports.git */
2
3 #undef CONFIG_MODULE_STRIPPED
4
5 #include <linux/version.h> /* LINUX_VERSION_CODE */
6 #include <linux/types.h>
7
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
9
10 #define dev_get_iflink(_net_dev) ((_net_dev)->iflink)
11
12 #endif /* < KERNEL_VERSION(4, 1, 0) */
13
14 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
15
16 #include <linux/netdevice.h>
17
18 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
19 BUILD_BUG_ON(upper_priv != NULL); \
20 BUILD_BUG_ON(upper_info != NULL); \
21 netdev_set_master(dev, upper_dev); \
22 })
23
24 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
25
26 #include <linux/netdevice.h>
27
28 #define netdev_master_upper_dev_link(dev, upper_dev, upper_priv, upper_info) ({\
29 BUILD_BUG_ON(upper_priv != NULL); \
30 BUILD_BUG_ON(upper_info != NULL); \
31 netdev_master_upper_dev_link(dev, upper_dev); \
32 })
33
34 #endif /* < KERNEL_VERSION(4, 5, 0) */
35
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
37
38 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
39 unsigned int transport_len,
40 __sum16(*skb_chkf)(struct sk_buff *skb));
41
42 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
43
44 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
45
46 #endif /* < KERNEL_VERSION(4, 2, 0) */
47
48 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
49
50 #define IFF_NO_QUEUE 0; dev->tx_queue_len = 0
51
52 #endif /* < KERNEL_VERSION(4, 3, 0) */
53
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
55
56 #include <linux/ethtool.h>
57
58 #define ethtool_link_ksettings batadv_ethtool_link_ksettings
59
60 struct batadv_ethtool_link_ksettings {
61 struct {
62 __u32 speed;
63 __u8 duplex;
64 } base;
65 };
66
67 #define __ethtool_get_link_ksettings(__dev, __link_settings) \
68 batadv_ethtool_get_link_ksettings(__dev, __link_settings)
69
70 static inline int
71 batadv_ethtool_get_link_ksettings(struct net_device *dev,
72 struct ethtool_link_ksettings *link_ksettings)
73 {
74 struct ethtool_cmd cmd;
75 int ret;
76
77 memset(&cmd, 0, sizeof(cmd));
78 ret = __ethtool_get_settings(dev, &cmd);
79
80 if (ret != 0)
81 return ret;
82
83 link_ksettings->base.duplex = cmd.duplex;
84 link_ksettings->base.speed = ethtool_cmd_speed(&cmd);
85
86 return 0;
87 }
88
89 #endif /* < KERNEL_VERSION(4, 6, 0) */
90
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
92
93 #define netif_trans_update batadv_netif_trans_update
94 static inline void batadv_netif_trans_update(struct net_device *dev)
95 {
96 dev->trans_start = jiffies;
97 }
98
99 #endif /* < KERNEL_VERSION(4, 7, 0) */
100
101
102 #include_next <linux/netlink.h>
103
104 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
105
106 #include_next <net/netlink.h>
107
108 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb);
109
110 static inline int batadv_nla_align_64bit(struct sk_buff *skb, int padattr)
111 {
112 if (batadv_nla_need_padding_for_64bit(skb) &&
113 !nla_reserve(skb, padattr, 0))
114 return -EMSGSIZE;
115
116 return 0;
117 }
118
119 static inline struct nlattr *batadv__nla_reserve_64bit(struct sk_buff *skb,
120 int attrtype,
121 int attrlen, int padattr)
122 {
123 if (batadv_nla_need_padding_for_64bit(skb))
124 batadv_nla_align_64bit(skb, padattr);
125
126 return __nla_reserve(skb, attrtype, attrlen);
127 }
128
129 static inline void batadv__nla_put_64bit(struct sk_buff *skb, int attrtype,
130 int attrlen, const void *data,
131 int padattr)
132 {
133 struct nlattr *nla;
134
135 nla = batadv__nla_reserve_64bit(skb, attrtype, attrlen, padattr);
136 memcpy(nla_data(nla), data, attrlen);
137 }
138
139 static inline bool batadv_nla_need_padding_for_64bit(struct sk_buff *skb)
140 {
141 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
142 /* The nlattr header is 4 bytes in size, that's why we test
143 * if the skb->data _is_ aligned. A NOP attribute, plus
144 * nlattr header for next attribute, will make nla_data()
145 * 8-byte aligned.
146 */
147 if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
148 return true;
149 #endif
150 return false;
151 }
152
153 static inline int batadv_nla_total_size_64bit(int payload)
154 {
155 return NLA_ALIGN(nla_attr_size(payload))
156 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
157 + NLA_ALIGN(nla_attr_size(0))
158 #endif
159 ;
160 }
161
162 static inline int batadv_nla_put_64bit(struct sk_buff *skb, int attrtype,
163 int attrlen, const void *data,
164 int padattr)
165 {
166 size_t len;
167
168 if (batadv_nla_need_padding_for_64bit(skb))
169 len = batadv_nla_total_size_64bit(attrlen);
170 else
171 len = nla_total_size(attrlen);
172 if (unlikely(skb_tailroom(skb) < len))
173 return -EMSGSIZE;
174
175 batadv__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
176 return 0;
177 }
178
179 #define nla_put_u64_64bit(_skb, _attrtype, _value, _padattr) \
180 batadv_nla_put_u64_64bit(_skb, _attrtype, _value, _padattr)
181 static inline int batadv_nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
182 u64 value, int padattr)
183 {
184 return batadv_nla_put_64bit(skb, attrtype, sizeof(u64), &value,
185 padattr);
186 }
187
188 #endif /* < KERNEL_VERSION(4, 7, 0) */
189