39538b122ead7442f5600777d6af94db5448ccc6
[openwrt/openwrt.git] / package / kernel / mac80211 / patches / subsys / 331-mac80211-rework-the-airtime-fairness-implementation.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sat, 28 May 2022 16:51:51 +0200
3 Subject: [PATCH] mac80211: rework the airtime fairness implementation
4
5 The current ATF implementation has a number of issues which have shown up
6 during testing. Since it does not take into account the AQL budget of
7 pending packets, the implementation might queue up large amounts of packets
8 for a single txq until airtime gets reported after tx completion.
9 The same then happens to the next txq afterwards. While the end result could
10 still be considered fair, the bursty behavior introduces a large amount of
11 latency.
12 The current code also tries to avoid frequent re-sorting of txq entries in
13 order to avoid having to re-balance the rbtree often.
14
15 In order to fix these issues, introduce skip lists as a data structure, which
16 offer similar lookup/insert/delete times as rbtree, but avoids the need for
17 rebalacing by being probabilistic.
18 Use this to keep tx entries sorted by virtual time + pending AQL budget and
19 re-sort after each ieee80211_return_txq call.
20
21 Since multiple txqs share a single air_time struct with a virtual time value,
22 switch the active_txqs list to queue up air_time structs instead of queues.
23 This helps avoid imbalance between shared txqs by servicing them round robin.
24
25 ieee80211_next_txq now only dequeues the first element of active_txqs. To
26 make that work for non-AQL or non-ATF drivers as well, add estimated tx
27 airtime directly to air_info virtual time if either AQL or ATF is not
28 supported.
29
30 Signed-off-by: Felix Fietkau <nbd@nbd.name>
31 ---
32 create mode 100644 include/linux/skiplist.h
33
34 --- /dev/null
35 +++ b/include/linux/skiplist.h
36 @@ -0,0 +1,250 @@
37 +/* SPDX-License-Identifier: GPL-2.0-or-later */
38 +/*
39 + * A skip list is a probabilistic alternative to balanced trees. Unlike the
40 + * red-black tree, it does not require rebalancing.
41 + *
42 + * This implementation uses only unidirectional next pointers and is optimized
43 + * for use in a priority queue where elements are mostly deleted from the front
44 + * of the queue.
45 + *
46 + * When storing up to 2^n elements in a n-level skiplist. lookup and deletion
47 + * for the first element happens in O(1) time, other than that, insertion and
48 + * deletion takes O(log n) time, assuming that the number of elements for an
49 + * n-level list does not exceed 2^n.
50 + *
51 + * Usage:
52 + * DECLARE_SKIPLIST_TYPE(foo, 5) will define the data types for a 5-level list:
53 + * struct foo_list: the list data type
54 + * struct foo_node: the node data for an element in the list
55 + *
56 + * DECLARE_SKIPLIST_IMPL(foo, foo_cmp_fn)
57 + *
58 + * Adds the skip list implementation. It depends on a provided function:
59 + * int foo_cmp_fn(struct foo_list *list, struct foo_node *n1, struct foo_node *n2)
60 + * This compares two elements given by their node pointers, returning values <0
61 + * if n1 is less than n2, =0 and >0 for equal or bigger than respectively.
62 + *
63 + * This macro implements the following functions:
64 + *
65 + * void foo_list_init(struct foo_list *list)
66 + * initializes the skip list
67 + *
68 + * void foo_node_init(struct foo_node *node)
69 + * initializes a node. must be called before adding the node to the list
70 + *
71 + * struct foo_node *foo_node_next(struct foo_node *node)
72 + * gets the node directly after the provided node, or NULL if it was the last
73 + * element in the list.
74 + *
75 + * bool foo_is_queued(struct foo_node *node)
76 + * returns true if the node is on a list
77 + *
78 + * struct foo_node *foo_dequeue(struct foo_list *list)
79 + * deletes and returns the first element of the list (or returns NULL if empty)
80 + *
81 + * struct foo_node *foo_peek(struct foo_list *list)
82 + * returns the first element of the list
83 + *
84 + * void foo_insert(struct foo_list *list, struct foo_node *node)
85 + * inserts the node into the list. the node must be initialized and not on a
86 + * list already.
87 + *
88 + * void foo_delete(struct foo_list *list, struct foo_node *node)
89 + * deletes the node from the list, or does nothing if it's not on the list
90 + */
91 +#ifndef __SKIPLIST_H
92 +#define __SKIPLIST_H
93 +
94 +#include <linux/bits.h>
95 +#include <linux/minmax.h>
96 +#include <linux/bug.h>
97 +#include <linux/prandom.h>
98 +
99 +#define SKIPLIST_POISON ((void *)1)
100 +
101 +#define DECLARE_SKIPLIST_TYPE(name, levels) \
102 +struct name##_node { \
103 + struct name##_node *next[levels]; \
104 +}; \
105 +struct name##_list { \
106 + struct name##_node head; \
107 + unsigned int max_level; \
108 + unsigned int count; \
109 +};
110 +
111 +#define DECLARE_SKIPLIST_IMPL(name, cmp_fn) \
112 +static inline void \
113 +name##_list_init(struct name##_list *list) \
114 +{ \
115 + memset(list, 0, sizeof(*list)); \
116 +} \
117 +static inline void \
118 +name##_node_init(struct name##_node *node) \
119 +{ \
120 + node->next[0] = SKIPLIST_POISON; \
121 +} \
122 +static inline struct name##_node * \
123 +name##_node_next(struct name##_node *node) \
124 +{ \
125 + return node->next[0]; \
126 +} \
127 +static inline bool \
128 +name##_is_queued(struct name##_node *node) \
129 +{ \
130 + return node->next[0] != SKIPLIST_POISON; \
131 +} \
132 +static inline int \
133 +__skiplist_##name##_cmp_impl(void *head, void *n1, void *n2) \
134 +{ \
135 + return cmp_fn(head, n1, n2); \
136 +} \
137 +static inline void \
138 +__##name##_delete(struct name##_list *list) \
139 +{ \
140 + list->count--; \
141 + while (list->max_level && \
142 + !list->head.next[list->max_level]) \
143 + list->max_level--; \
144 +} \
145 +static inline struct name##_node * \
146 +name##_dequeue(struct name##_list *list) \
147 +{ \
148 + struct name##_node *ret; \
149 + unsigned int max_level = ARRAY_SIZE(list->head.next) - 1; \
150 + ret = (void *)__skiplist_dequeue((void **)&list->head, \
151 + max_level); \
152 + if (!ret) \
153 + return NULL; \
154 + __##name##_delete(list); \
155 + return ret; \
156 +} \
157 +static inline struct name##_node * \
158 +name##_peek(struct name##_list *list) \
159 +{ \
160 + return list->head.next[0]; \
161 +} \
162 +static inline void \
163 +name##_insert(struct name##_list *list, struct name##_node *node) \
164 +{ \
165 + int level = __skiplist_level(ARRAY_SIZE(list->head.next) - 1, \
166 + list->count, prandom_u32()); \
167 + level = min_t(int, level, list->max_level + 1); \
168 + __skiplist_insert((void *)&list->head, (void *)node, level, \
169 + __skiplist_##name##_cmp_impl); \
170 + if (level > list->max_level) \
171 + list->max_level = level; \
172 + list->count++; \
173 +} \
174 +static inline void \
175 +name##_delete(struct name##_list *list, struct name##_node *node) \
176 +{ \
177 + if (node->next[0] == SKIPLIST_POISON) \
178 + return; \
179 + __skiplist_delete((void *)&list->head, (void *)node, \
180 + ARRAY_SIZE(list->head.next) - 1, \
181 + __skiplist_##name##_cmp_impl); \
182 + __##name##_delete(list); \
183 +}
184 +
185 +
186 +typedef int (*__skiplist_cmp_t)(void *head, void *n1, void *n2);
187 +
188 +#define __skiplist_cmp(cmp, head, cur, node) \
189 + ({ \
190 + int cmp_val = cmp(head, cur, node); \
191 + if (!cmp_val) \
192 + cmp_val = (unsigned long)(cur) - \
193 + (unsigned long)(node); \
194 + cmp_val; \
195 + })
196 +
197 +static inline void *
198 +__skiplist_dequeue(void **list, int max_level)
199 +{
200 + void **node = list[0];
201 + unsigned int i;
202 +
203 + if (!node)
204 + return NULL;
205 +
206 + list[0] = node[0];
207 + for (i = 1; i <= max_level; i++) {
208 + if (list[i] != node)
209 + break;
210 +
211 + list[i] = node[i];
212 + }
213 + node[0] = SKIPLIST_POISON;
214 +
215 + return node;
216 +}
217 +
218 +static inline void
219 +__skiplist_insert(void **list, void **node, int level, __skiplist_cmp_t cmp)
220 +{
221 + void **head = list;
222 +
223 + if (WARN(node[0] != SKIPLIST_POISON, "Insert on already inserted or uninitialized node"))
224 + return;
225 + for (; level >= 0; level--) {
226 + while (list[level] &&
227 + __skiplist_cmp(cmp, head, list[level], node) < 0)
228 + list = list[level];
229 +
230 + node[level] = list[level];
231 + list[level] = node;
232 + }
233 +}
234 +
235 +
236 +static inline void
237 +__skiplist_delete(void **list, void **node, int max_level, __skiplist_cmp_t cmp)
238 +{
239 + void *head = list;
240 + int i;
241 +
242 + for (i = max_level; i >= 0; i--) {
243 + while (list[i] && list[i] != node &&
244 + __skiplist_cmp(cmp, head, list[i], node) <= 0)
245 + list = list[i];
246 +
247 + if (list[i] != node)
248 + continue;
249 +
250 + list[i] = node[i];
251 + }
252 + node[0] = SKIPLIST_POISON;
253 +}
254 +
255 +static inline unsigned int
256 +__skiplist_level(unsigned int max_level, unsigned int count, unsigned int seed)
257 +{
258 + unsigned int level = 0;
259 +
260 + if (max_level >= 16 && !(seed & GENMASK(15, 0))) {
261 + level += 16;
262 + seed >>= 16;
263 + }
264 +
265 + if (max_level >= 8 && !(seed & GENMASK(7, 0))) {
266 + level += 8;
267 + seed >>= 8;
268 + }
269 +
270 + if (max_level >= 4 && !(seed & GENMASK(3, 0))) {
271 + level += 4;
272 + seed >>= 4;
273 + }
274 +
275 + if (!(seed & GENMASK(1, 0))) {
276 + level += 2;
277 + seed >>= 2;
278 + }
279 +
280 + if (!(seed & BIT(0)))
281 + level++;
282 +
283 + return min(level, max_level);
284 +}
285 +
286 +#endif
287 --- a/net/mac80211/cfg.c
288 +++ b/net/mac80211/cfg.c
289 @@ -1563,7 +1563,6 @@ static void sta_apply_airtime_params(str
290 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
291 struct airtime_sched_info *air_sched = &local->airtime[ac];
292 struct airtime_info *air_info = &sta->airtime[ac];
293 - struct txq_info *txqi;
294 u8 tid;
295
296 spin_lock_bh(&air_sched->lock);
297 @@ -1575,10 +1574,6 @@ static void sta_apply_airtime_params(str
298
299 airtime_weight_set(air_info, params->airtime_weight);
300
301 - txqi = to_txq_info(sta->sta.txq[tid]);
302 - if (RB_EMPTY_NODE(&txqi->schedule_order))
303 - continue;
304 -
305 ieee80211_update_airtime_weight(local, air_sched,
306 0, true);
307 }
308 --- a/net/mac80211/ieee80211_i.h
309 +++ b/net/mac80211/ieee80211_i.h
310 @@ -25,7 +25,8 @@
311 #include <linux/leds.h>
312 #include <linux/idr.h>
313 #include <linux/rhashtable.h>
314 -#include <linux/rbtree.h>
315 +#include <linux/prandom.h>
316 +#include <linux/skiplist.h>
317 #include <net/ieee80211_radiotap.h>
318 #include <net/cfg80211.h>
319 #include <net/mac80211.h>
320 @@ -854,6 +855,7 @@ enum txq_info_flags {
321 IEEE80211_TXQ_AMPDU,
322 IEEE80211_TXQ_NO_AMSDU,
323 IEEE80211_TXQ_STOP_NETIF_TX,
324 + IEEE80211_TXQ_FORCE_ACTIVE,
325 };
326
327 /**
328 @@ -870,7 +872,6 @@ struct txq_info {
329 struct fq_tin tin;
330 struct codel_vars def_cvars;
331 struct codel_stats cstats;
332 - struct rb_node schedule_order;
333
334 struct sk_buff_head frags;
335 unsigned long flags;
336 @@ -1185,8 +1186,7 @@ enum mac80211_scan_state {
337 *
338 * @lock: spinlock that protects all the fields in this struct
339 * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
340 - * @schedule_pos: the current position maintained while a driver walks the tree
341 - * with ieee80211_next_txq()
342 + * @schedule_pos: last used airtime_info node while a driver walks the tree
343 * @active_list: list of struct airtime_info structs that were active within
344 * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
345 * weight_sum
346 @@ -1207,8 +1207,8 @@ enum mac80211_scan_state {
347 */
348 struct airtime_sched_info {
349 spinlock_t lock;
350 - struct rb_root_cached active_txqs;
351 - struct rb_node *schedule_pos;
352 + struct airtime_sched_list active_txqs;
353 + struct airtime_sched_node *schedule_pos;
354 struct list_head active_list;
355 u64 last_weight_update;
356 u64 last_schedule_activity;
357 @@ -1663,6 +1663,20 @@ static inline struct airtime_info *to_ai
358 return &sdata->airtime[txq->ac];
359 }
360
361 +static inline int
362 +airtime_sched_cmp(struct airtime_sched_list *list,
363 + struct airtime_sched_node *n1, struct airtime_sched_node *n2)
364 +{
365 + struct airtime_info *a1, *a2;
366 +
367 + a1 = container_of(n1, struct airtime_info, schedule_order);
368 + a2 = container_of(n2, struct airtime_info, schedule_order);
369 +
370 + return a1->v_t_cur - a2->v_t_cur;
371 +}
372 +
373 +DECLARE_SKIPLIST_IMPL(airtime_sched, airtime_sched_cmp);
374 +
375 /* To avoid divisions in the fast path, we keep pre-computed reciprocals for
376 * airtime weight calculations. There are two different weights to keep track
377 * of: The per-station weight and the sum of weights per phy.
378 @@ -1750,6 +1764,7 @@ static inline void init_airtime_info(str
379 air_info->aql_limit_high = air_sched->aql_txq_limit_high;
380 airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
381 INIT_LIST_HEAD(&air_info->list);
382 + airtime_sched_node_init(&air_info->schedule_order);
383 }
384
385 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
386 --- a/net/mac80211/main.c
387 +++ b/net/mac80211/main.c
388 @@ -709,7 +709,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_
389 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
390 struct airtime_sched_info *air_sched = &local->airtime[i];
391
392 - air_sched->active_txqs = RB_ROOT_CACHED;
393 + airtime_sched_list_init(&air_sched->active_txqs);
394 INIT_LIST_HEAD(&air_sched->active_list);
395 spin_lock_init(&air_sched->lock);
396 air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
397 --- a/net/mac80211/sta_info.c
398 +++ b/net/mac80211/sta_info.c
399 @@ -1902,8 +1902,7 @@ void ieee80211_register_airtime(struct i
400 air_sched = &local->airtime[txq->ac];
401 air_info = to_airtime_info(txq);
402
403 - if (local->airtime_flags & AIRTIME_USE_TX)
404 - airtime += tx_airtime;
405 + airtime += tx_airtime;
406 if (local->airtime_flags & AIRTIME_USE_RX)
407 airtime += rx_airtime;
408
409 --- a/net/mac80211/sta_info.h
410 +++ b/net/mac80211/sta_info.h
411 @@ -135,11 +135,14 @@ enum ieee80211_agg_stop_reason {
412 #define AIRTIME_USE_TX BIT(0)
413 #define AIRTIME_USE_RX BIT(1)
414
415 +DECLARE_SKIPLIST_TYPE(airtime_sched, 5);
416
417 struct airtime_info {
418 + struct airtime_sched_node schedule_order;
419 + struct ieee80211_txq *txq[3];
420 u64 rx_airtime;
421 u64 tx_airtime;
422 - u64 v_t;
423 + u64 v_t, v_t_cur;
424 u64 last_scheduled;
425 struct list_head list;
426 atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
427 @@ -147,6 +150,7 @@ struct airtime_info {
428 u32 aql_limit_high;
429 u32 weight_reciprocal;
430 u16 weight;
431 + u8 txq_idx;
432 };
433
434 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
435 --- a/net/mac80211/tx.c
436 +++ b/net/mac80211/tx.c
437 @@ -19,6 +19,7 @@
438 #include <linux/rcupdate.h>
439 #include <linux/export.h>
440 #include <linux/timekeeping.h>
441 +#include <linux/prandom.h>
442 #include <net/net_namespace.h>
443 #include <net/ieee80211_radiotap.h>
444 #include <net/cfg80211.h>
445 @@ -1476,11 +1477,12 @@ void ieee80211_txq_init(struct ieee80211
446 struct sta_info *sta,
447 struct txq_info *txqi, int tid)
448 {
449 + struct airtime_info *air_info;
450 +
451 fq_tin_init(&txqi->tin);
452 codel_vars_init(&txqi->def_cvars);
453 codel_stats_init(&txqi->cstats);
454 __skb_queue_head_init(&txqi->frags);
455 - RB_CLEAR_NODE(&txqi->schedule_order);
456
457 txqi->txq.vif = &sdata->vif;
458
459 @@ -1489,7 +1491,7 @@ void ieee80211_txq_init(struct ieee80211
460 txqi->txq.tid = 0;
461 txqi->txq.ac = IEEE80211_AC_BE;
462
463 - return;
464 + goto out;
465 }
466
467 if (tid == IEEE80211_NUM_TIDS) {
468 @@ -1511,6 +1513,12 @@ void ieee80211_txq_init(struct ieee80211
469 txqi->txq.sta = &sta->sta;
470 txqi->txq.tid = tid;
471 sta->sta.txq[tid] = &txqi->txq;
472 +
473 +out:
474 + air_info = to_airtime_info(&txqi->txq);
475 + air_info->txq[air_info->txq_idx++] = &txqi->txq;
476 + if (air_info->txq_idx == ARRAY_SIZE(air_info->txq))
477 + air_info->txq_idx--;
478 }
479
480 void ieee80211_txq_purge(struct ieee80211_local *local,
481 @@ -3633,6 +3641,8 @@ struct sk_buff *ieee80211_tx_dequeue(str
482 struct ieee80211_tx_data tx;
483 ieee80211_tx_result r;
484 struct ieee80211_vif *vif = txq->vif;
485 + u32 airtime;
486 + bool ampdu;
487
488 WARN_ON_ONCE(softirq_count() == 0);
489
490 @@ -3791,21 +3801,26 @@ begin:
491 encap_out:
492 IEEE80211_SKB_CB(skb)->control.vif = vif;
493
494 - if (vif &&
495 - wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
496 - bool ampdu = txq->ac != IEEE80211_AC_VO;
497 - u32 airtime;
498 -
499 - airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
500 - skb->len, ampdu);
501 - if (airtime) {
502 - airtime = ieee80211_info_set_tx_time_est(info, airtime);
503 - ieee80211_sta_update_pending_airtime(local, tx.sta,
504 - txq->ac,
505 - airtime,
506 - false);
507 - }
508 - }
509 + if (!vif)
510 + return skb;
511 +
512 + ampdu = txq->ac != IEEE80211_AC_VO;
513 + airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
514 + skb->len, ampdu);
515 + if (!airtime)
516 + return skb;
517 +
518 + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL) ||
519 + !wiphy_ext_feature_isset(local->hw.wiphy,
520 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
521 + ieee80211_register_airtime(txq, airtime, 0);
522 +
523 + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
524 + return skb;
525 +
526 + airtime = ieee80211_info_set_tx_time_est(info, airtime);
527 + ieee80211_sta_update_pending_airtime(local, tx.sta, txq->ac,
528 + airtime, false);
529
530 return skb;
531
532 @@ -3816,85 +3831,95 @@ out:
533 }
534 EXPORT_SYMBOL(ieee80211_tx_dequeue);
535
536 +static void
537 +airtime_info_next_txq_idx(struct airtime_info *air_info)
538 +{
539 + air_info->txq_idx++;
540 + if (air_info->txq_idx >= ARRAY_SIZE(air_info->txq) ||
541 + !air_info->txq[air_info->txq_idx])
542 + air_info->txq_idx = 0;
543 +}
544 +
545 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
546 {
547 struct ieee80211_local *local = hw_to_local(hw);
548 struct airtime_sched_info *air_sched;
549 u64 now = ktime_get_coarse_boottime_ns();
550 - struct ieee80211_txq *ret = NULL;
551 + struct airtime_sched_node *node = NULL;
552 + struct ieee80211_txq *txq;
553 struct airtime_info *air_info;
554 struct txq_info *txqi = NULL;
555 - struct rb_node *node;
556 - bool first = false;
557 + u8 txq_idx;
558
559 air_sched = &local->airtime[ac];
560 spin_lock_bh(&air_sched->lock);
561
562 - node = air_sched->schedule_pos;
563 -
564 begin:
565 - if (!node) {
566 - node = rb_first_cached(&air_sched->active_txqs);
567 - first = true;
568 - } else {
569 - node = rb_next(node);
570 - }
571 + txq = NULL;
572 + if (airtime_sched_peek(&air_sched->active_txqs) ==
573 + air_sched->schedule_pos)
574 + goto out;
575
576 + node = airtime_sched_dequeue(&air_sched->active_txqs);
577 if (!node)
578 goto out;
579
580 - txqi = container_of(node, struct txq_info, schedule_order);
581 - air_info = to_airtime_info(&txqi->txq);
582 + air_info = container_of(node, struct airtime_info, schedule_order);
583
584 - if (air_info->v_t > air_sched->v_t &&
585 - (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
586 - goto out;
587 -
588 - if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
589 - first = false;
590 + airtime_info_next_txq_idx(air_info);
591 + txq_idx = air_info->txq_idx;
592 + txq = air_info->txq[txq_idx];
593 + if (!txq || !ieee80211_txq_airtime_check(hw, txq))
594 goto begin;
595 +
596 + while (1) {
597 + txqi = to_txq_info(txq);
598 + if (test_and_clear_bit(IEEE80211_TXQ_FORCE_ACTIVE, &txqi->flags))
599 + break;
600 +
601 + if (txq_has_queue(txq))
602 + break;
603 +
604 + airtime_info_next_txq_idx(air_info);
605 + txq = air_info->txq[air_info->txq_idx];
606 + if (txq_idx == air_info->txq_idx)
607 + goto begin;
608 + }
609 +
610 + if (air_info->v_t_cur > air_sched->v_t) {
611 + if (node == airtime_sched_peek(&air_sched->active_txqs))
612 + airtime_catchup_v_t(air_sched, air_info->v_t_cur, now);
613 }
614
615 air_sched->schedule_pos = node;
616 air_sched->last_schedule_activity = now;
617 - ret = &txqi->txq;
618 out:
619 spin_unlock_bh(&air_sched->lock);
620 - return ret;
621 + return txq;
622 }
623 EXPORT_SYMBOL(ieee80211_next_txq);
624
625 -static void __ieee80211_insert_txq(struct rb_root_cached *root,
626 +static void __ieee80211_insert_txq(struct ieee80211_local *local,
627 + struct airtime_sched_info *air_sched,
628 struct txq_info *txqi)
629 {
630 - struct rb_node **new = &root->rb_root.rb_node;
631 - struct airtime_info *old_air, *new_air;
632 - struct rb_node *parent = NULL;
633 - struct txq_info *__txqi;
634 - bool leftmost = true;
635 -
636 - while (*new) {
637 - parent = *new;
638 - __txqi = rb_entry(parent, struct txq_info, schedule_order);
639 - old_air = to_airtime_info(&__txqi->txq);
640 - new_air = to_airtime_info(&txqi->txq);
641 + struct airtime_info *air_info = to_airtime_info(&txqi->txq);
642 + u32 aql_time = 0;
643
644 - if (new_air->v_t <= old_air->v_t) {
645 - new = &parent->rb_left;
646 - } else {
647 - new = &parent->rb_right;
648 - leftmost = false;
649 - }
650 + if (wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
651 + aql_time = atomic_read(&air_info->aql_tx_pending);
652 + aql_time *= air_info->weight_reciprocal;
653 + aql_time >>= IEEE80211_RECIPROCAL_SHIFT_STA - IEEE80211_WEIGHT_SHIFT;
654 }
655
656 - rb_link_node(&txqi->schedule_order, parent, new);
657 - rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
658 + airtime_sched_delete(&air_sched->active_txqs, &air_info->schedule_order);
659 + air_info->v_t_cur = air_info->v_t + aql_time;
660 + airtime_sched_insert(&air_sched->active_txqs, &air_info->schedule_order);
661 }
662
663 void ieee80211_resort_txq(struct ieee80211_hw *hw,
664 struct ieee80211_txq *txq)
665 {
666 - struct airtime_info *air_info = to_airtime_info(txq);
667 struct ieee80211_local *local = hw_to_local(hw);
668 struct txq_info *txqi = to_txq_info(txq);
669 struct airtime_sched_info *air_sched;
670 @@ -3902,41 +3927,7 @@ void ieee80211_resort_txq(struct ieee802
671 air_sched = &local->airtime[txq->ac];
672
673 lockdep_assert_held(&air_sched->lock);
674 -
675 - if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
676 - struct airtime_info *a_prev = NULL, *a_next = NULL;
677 - struct txq_info *t_prev, *t_next;
678 - struct rb_node *n_prev, *n_next;
679 -
680 - /* Erasing a node can cause an expensive rebalancing operation,
681 - * so we check the previous and next nodes first and only remove
682 - * and re-insert if the current node is not already in the
683 - * correct position.
684 - */
685 - if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
686 - t_prev = container_of(n_prev, struct txq_info,
687 - schedule_order);
688 - a_prev = to_airtime_info(&t_prev->txq);
689 - }
690 -
691 - if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
692 - t_next = container_of(n_next, struct txq_info,
693 - schedule_order);
694 - a_next = to_airtime_info(&t_next->txq);
695 - }
696 -
697 - if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
698 - (!a_next || a_next->v_t > air_info->v_t))
699 - return;
700 -
701 - if (air_sched->schedule_pos == &txqi->schedule_order)
702 - air_sched->schedule_pos = n_prev;
703 -
704 - rb_erase_cached(&txqi->schedule_order,
705 - &air_sched->active_txqs);
706 - RB_CLEAR_NODE(&txqi->schedule_order);
707 - __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
708 - }
709 + __ieee80211_insert_txq(local, air_sched, txqi);
710 }
711
712 void ieee80211_update_airtime_weight(struct ieee80211_local *local,
713 @@ -3985,7 +3976,7 @@ void ieee80211_schedule_txq(struct ieee8
714 was_active = airtime_is_active(air_info, now);
715 airtime_set_active(air_sched, air_info, now);
716
717 - if (!RB_EMPTY_NODE(&txqi->schedule_order))
718 + if (airtime_sched_is_queued(&air_info->schedule_order))
719 goto out;
720
721 /* If the station has been inactive for a while, catch up its v_t so it
722 @@ -3997,7 +3988,7 @@ void ieee80211_schedule_txq(struct ieee8
723 air_info->v_t = air_sched->v_t;
724
725 ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
726 - __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
727 + __ieee80211_insert_txq(local, air_sched, txqi);
728
729 out:
730 spin_unlock_bh(&air_sched->lock);
731 @@ -4023,19 +4014,10 @@ static void __ieee80211_unschedule_txq(s
732 ieee80211_update_airtime_weight(local, air_sched, 0, true);
733 }
734
735 - if (RB_EMPTY_NODE(&txqi->schedule_order))
736 - return;
737 -
738 - if (air_sched->schedule_pos == &txqi->schedule_order)
739 - air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
740 -
741 + airtime_sched_delete(&air_sched->active_txqs, &air_info->schedule_order);
742 if (!purge)
743 airtime_set_active(air_sched, air_info,
744 ktime_get_coarse_boottime_ns());
745 -
746 - rb_erase_cached(&txqi->schedule_order,
747 - &air_sched->active_txqs);
748 - RB_CLEAR_NODE(&txqi->schedule_order);
749 }
750
751 void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
752 @@ -4055,14 +4037,24 @@ void ieee80211_return_txq(struct ieee802
753 {
754 struct ieee80211_local *local = hw_to_local(hw);
755 struct txq_info *txqi = to_txq_info(txq);
756 + struct airtime_sched_info *air_sched;
757 + struct airtime_info *air_info;
758
759 - spin_lock_bh(&local->airtime[txq->ac].lock);
760 + air_sched = &local->airtime[txq->ac];
761 + air_info = to_airtime_info(&txqi->txq);
762
763 - if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
764 - !txq_has_queue(txq))
765 - __ieee80211_unschedule_txq(hw, txq, false);
766 + if (force)
767 + set_bit(IEEE80211_TXQ_FORCE_ACTIVE, &txqi->flags);
768
769 - spin_unlock_bh(&local->airtime[txq->ac].lock);
770 + spin_lock_bh(&air_sched->lock);
771 + if (!ieee80211_txq_airtime_check(hw, &txqi->txq))
772 + airtime_sched_delete(&air_sched->active_txqs,
773 + &air_info->schedule_order);
774 + else if (txq_has_queue(txq) || force)
775 + __ieee80211_insert_txq(local, air_sched, txqi);
776 + else
777 + __ieee80211_unschedule_txq(hw, txq, false);
778 + spin_unlock_bh(&air_sched->lock);
779 }
780 EXPORT_SYMBOL(ieee80211_return_txq);
781
782 @@ -4101,46 +4093,48 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec
783 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
784 struct ieee80211_txq *txq)
785 {
786 - struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
787 + struct txq_info *txqi = to_txq_info(txq);
788 struct ieee80211_local *local = hw_to_local(hw);
789 struct airtime_sched_info *air_sched;
790 + struct airtime_sched_node *node = NULL;
791 struct airtime_info *air_info;
792 - struct rb_node *node = NULL;
793 bool ret = false;
794 + u32 aql_slack;
795 u64 now;
796
797 -
798 if (!ieee80211_txq_airtime_check(hw, txq))
799 return false;
800
801 air_sched = &local->airtime[txq->ac];
802 spin_lock_bh(&air_sched->lock);
803
804 - if (RB_EMPTY_NODE(&txqi->schedule_order))
805 - goto out;
806 -
807 now = ktime_get_coarse_boottime_ns();
808
809 /* Like in ieee80211_next_txq(), make sure the first station in the
810 * scheduling order is eligible for transmission to avoid starvation.
811 */
812 - node = rb_first_cached(&air_sched->active_txqs);
813 + node = airtime_sched_peek(&air_sched->active_txqs);
814 if (node) {
815 - first_txqi = container_of(node, struct txq_info,
816 - schedule_order);
817 - air_info = to_airtime_info(&first_txqi->txq);
818 + air_info = container_of(node, struct airtime_info,
819 + schedule_order);
820
821 if (air_sched->v_t < air_info->v_t)
822 airtime_catchup_v_t(air_sched, air_info->v_t, now);
823 }
824
825 air_info = to_airtime_info(&txqi->txq);
826 - if (air_info->v_t <= air_sched->v_t) {
827 + aql_slack = air_info->aql_limit_low;
828 + aql_slack *= air_info->weight_reciprocal;
829 + aql_slack >>= IEEE80211_RECIPROCAL_SHIFT_STA - IEEE80211_WEIGHT_SHIFT;
830 + /*
831 + * add extra slack of aql_limit_low in order to avoid queue
832 + * starvation when bypassing normal scheduling order
833 + */
834 + if (air_info->v_t <= air_sched->v_t + aql_slack) {
835 air_sched->last_schedule_activity = now;
836 ret = true;
837 }
838
839 -out:
840 spin_unlock_bh(&air_sched->lock);
841 return ret;
842 }
843 @@ -4151,9 +4145,7 @@ void ieee80211_txq_schedule_start(struct
844 struct ieee80211_local *local = hw_to_local(hw);
845 struct airtime_sched_info *air_sched = &local->airtime[ac];
846
847 - spin_lock_bh(&air_sched->lock);
848 air_sched->schedule_pos = NULL;
849 - spin_unlock_bh(&air_sched->lock);
850 }
851 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
852