bcm27xx: switch to 5.15
[openwrt/staging/noltari.git] / target / linux / bcm27xx / patches-5.10 / 950-0612-media-rpivid-Add-an-enable-count-to-irq-claim-Qs.patch
1 From 8b6aa431b277cc3c49c0c8be2b49b395d29325cf Mon Sep 17 00:00:00 2001
2 From: John Cox <jc@kynesim.co.uk>
3 Date: Thu, 11 Mar 2021 18:43:15 +0000
4 Subject: [PATCH] media: rpivid: Add an enable count to irq claim Qs
5
6 Add an enable count to the irq Q structures to allow the irq logic to
7 block further callbacks if resources associated with the irq are not
8 yet available.
9
10 Signed-off-by: John Cox <jc@kynesim.co.uk>
11 ---
12 drivers/staging/media/rpivid/rpivid.h | 2 +
13 drivers/staging/media/rpivid/rpivid_hw.c | 118 +++++++++++++++--------
14 drivers/staging/media/rpivid/rpivid_hw.h | 3 +
15 3 files changed, 85 insertions(+), 38 deletions(-)
16
17 --- a/drivers/staging/media/rpivid/rpivid.h
18 +++ b/drivers/staging/media/rpivid/rpivid.h
19 @@ -151,6 +151,8 @@ struct rpivid_hw_irq_ctrl {
20 struct rpivid_hw_irq_ent *irq;
21 /* Non-zero => do not start a new job - outer layer sched pending */
22 int no_sched;
23 + /* Enable count. -1 always OK, 0 do not sched, +ve shed & count down */
24 + int enable;
25 /* Thread CB requested */
26 bool thread_reqed;
27 };
28 --- a/drivers/staging/media/rpivid/rpivid_hw.c
29 +++ b/drivers/staging/media/rpivid/rpivid_hw.c
30 @@ -42,35 +42,62 @@ static void pre_irq(struct rpivid_dev *d
31 ient->cb = cb;
32 ient->v = v;
33
34 - // Not sure this lock is actually required
35 spin_lock_irqsave(&ictl->lock, flags);
36 ictl->irq = ient;
37 + ictl->no_sched++;
38 spin_unlock_irqrestore(&ictl->lock, flags);
39 }
40
41 -static void sched_claim(struct rpivid_dev * const dev,
42 - struct rpivid_hw_irq_ctrl * const ictl)
43 +/* Should be called from inside ictl->lock */
44 +static inline bool sched_enabled(const struct rpivid_hw_irq_ctrl * const ictl)
45 {
46 - for (;;) {
47 - struct rpivid_hw_irq_ent *ient = NULL;
48 - unsigned long flags;
49 + return ictl->no_sched <= 0 && ictl->enable;
50 +}
51
52 - spin_lock_irqsave(&ictl->lock, flags);
53 +/* Should be called from inside ictl->lock & after checking sched_enabled() */
54 +static inline void set_claimed(struct rpivid_hw_irq_ctrl * const ictl)
55 +{
56 + if (ictl->enable > 0)
57 + --ictl->enable;
58 + ictl->no_sched = 1;
59 +}
60
61 - if (--ictl->no_sched <= 0) {
62 - ient = ictl->claim;
63 - if (!ictl->irq && ient) {
64 - ictl->claim = ient->next;
65 - ictl->no_sched = 1;
66 - }
67 - }
68 +/* Should be called from inside ictl->lock */
69 +static struct rpivid_hw_irq_ent *get_sched(struct rpivid_hw_irq_ctrl * const ictl)
70 +{
71 + struct rpivid_hw_irq_ent *ient;
72
73 - spin_unlock_irqrestore(&ictl->lock, flags);
74 + if (!sched_enabled(ictl))
75 + return NULL;
76 +
77 + ient = ictl->claim;
78 + if (!ient)
79 + return NULL;
80 + ictl->claim = ient->next;
81 +
82 + set_claimed(ictl);
83 + return ient;
84 +}
85
86 - if (!ient)
87 - break;
88 +/* Run a callback & check to see if there is anything else to run */
89 +static void sched_cb(struct rpivid_dev * const dev,
90 + struct rpivid_hw_irq_ctrl * const ictl,
91 + struct rpivid_hw_irq_ent *ient)
92 +{
93 + while (ient) {
94 + unsigned long flags;
95
96 ient->cb(dev, ient->v);
97 +
98 + spin_lock_irqsave(&ictl->lock, flags);
99 +
100 + /* Always dec no_sched after cb exec - must have been set
101 + * on entry to cb
102 + */
103 + --ictl->no_sched;
104 + ient = get_sched(ictl);
105 +
106 + spin_unlock_irqrestore(&ictl->lock, flags);
107 }
108 }
109
110 @@ -84,7 +111,7 @@ static void pre_thread(struct rpivid_dev
111 ient->v = v;
112 ictl->irq = ient;
113 ictl->thread_reqed = true;
114 - ictl->no_sched++;
115 + ictl->no_sched++; /* This is unwound in do_thread */
116 }
117
118 // Called in irq context
119 @@ -96,17 +123,10 @@ static void do_irq(struct rpivid_dev * c
120
121 spin_lock_irqsave(&ictl->lock, flags);
122 ient = ictl->irq;
123 - if (ient) {
124 - ictl->no_sched++;
125 - ictl->irq = NULL;
126 - }
127 + ictl->irq = NULL;
128 spin_unlock_irqrestore(&ictl->lock, flags);
129
130 - if (ient) {
131 - ient->cb(dev, ient->v);
132 -
133 - sched_claim(dev, ictl);
134 - }
135 + sched_cb(dev, ictl, ient);
136 }
137
138 static void do_claim(struct rpivid_dev * const dev,
139 @@ -127,7 +147,7 @@ static void do_claim(struct rpivid_dev *
140 ictl->tail->next = ient;
141 ictl->tail = ient;
142 ient = NULL;
143 - } else if (ictl->no_sched || ictl->irq) {
144 + } else if (!sched_enabled(ictl)) {
145 // Empty Q but other activity in progress so Q
146 ictl->claim = ient;
147 ictl->tail = ient;
148 @@ -135,16 +155,34 @@ static void do_claim(struct rpivid_dev *
149 } else {
150 // Nothing else going on - schedule immediately and
151 // prevent anything else scheduling claims
152 - ictl->no_sched = 1;
153 + set_claimed(ictl);
154 }
155
156 spin_unlock_irqrestore(&ictl->lock, flags);
157
158 - if (ient) {
159 - ient->cb(dev, ient->v);
160 + sched_cb(dev, ictl, ient);
161 +}
162
163 - sched_claim(dev, ictl);
164 - }
165 +/* Enable n claims.
166 + * n < 0 set to unlimited (default on init)
167 + * n = 0 if previously unlimited then disable otherwise nop
168 + * n > 0 if previously unlimited then set to n enables
169 + * otherwise add n enables
170 + * The enable count is automatically decremented every time a claim is run
171 + */
172 +static void do_enable_claim(struct rpivid_dev * const dev,
173 + int n,
174 + struct rpivid_hw_irq_ctrl * const ictl)
175 +{
176 + unsigned long flags;
177 + struct rpivid_hw_irq_ent *ient;
178 +
179 + spin_lock_irqsave(&ictl->lock, flags);
180 + ictl->enable = n < 0 ? -1 : ictl->enable <= 0 ? n : ictl->enable + n;
181 + ient = get_sched(ictl);
182 + spin_unlock_irqrestore(&ictl->lock, flags);
183 +
184 + sched_cb(dev, ictl, ient);
185 }
186
187 static void ictl_init(struct rpivid_hw_irq_ctrl * const ictl)
188 @@ -154,6 +192,8 @@ static void ictl_init(struct rpivid_hw_i
189 ictl->tail = NULL;
190 ictl->irq = NULL;
191 ictl->no_sched = 0;
192 + ictl->enable = -1;
193 + ictl->thread_reqed = false;
194 }
195
196 static void ictl_uninit(struct rpivid_hw_irq_ctrl * const ictl)
197 @@ -203,11 +243,7 @@ static void do_thread(struct rpivid_dev
198
199 spin_unlock_irqrestore(&ictl->lock, flags);
200
201 - if (ient) {
202 - ient->cb(dev, ient->v);
203 -
204 - sched_claim(dev, ictl);
205 - }
206 + sched_cb(dev, ictl, ient);
207 }
208
209 static irqreturn_t rpivid_irq_thread(int irq, void *data)
210 @@ -231,6 +267,12 @@ void rpivid_hw_irq_active1_thread(struct
211 pre_thread(dev, ient, thread_cb, ctx, &dev->ic_active1);
212 }
213
214 +void rpivid_hw_irq_active1_enable_claim(struct rpivid_dev *dev,
215 + int n)
216 +{
217 + do_enable_claim(dev, n, &dev->ic_active1);
218 +}
219 +
220 void rpivid_hw_irq_active1_claim(struct rpivid_dev *dev,
221 struct rpivid_hw_irq_ent *ient,
222 rpivid_irq_callback ready_cb, void *ctx)
223 --- a/drivers/staging/media/rpivid/rpivid_hw.h
224 +++ b/drivers/staging/media/rpivid/rpivid_hw.h
225 @@ -272,6 +272,9 @@ static inline void apb_write_vc_len(cons
226 ARG_IC_ICTRL_ACTIVE1_INT_SET |\
227 ARG_IC_ICTRL_ACTIVE2_INT_SET)
228
229 +/* Regulate claim Q */
230 +void rpivid_hw_irq_active1_enable_claim(struct rpivid_dev *dev,
231 + int n);
232 /* Auto release once all CBs called */
233 void rpivid_hw_irq_active1_claim(struct rpivid_dev *dev,
234 struct rpivid_hw_irq_ent *ient,