bcm27xx: update to latest patches from RPi foundation
[openwrt/openwrt.git] / target / linux / bcm27xx / patches-5.4 / 950-0493-media-v4l2-mem2mem-support-held-capture-buffers.patch
1 From dc9b786e4b9a1262b536b3c9d0fa88e34a2b3f8f Mon Sep 17 00:00:00 2001
2 From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
3 Date: Fri, 11 Oct 2019 06:32:41 -0300
4 Subject: [PATCH] media: v4l2-mem2mem: support held capture buffers
5
6 Commit f8cca8c97a63d77f48334cde81d15014f43530ef upstream.
7
8 Check for held buffers that are ready to be returned to vb2 in
9 __v4l2_m2m_try_queue(). This avoids drivers having to handle this
10 case.
11
12 Add v4l2_m2m_buf_done_and_job_finish() to correctly return source
13 and destination buffers and mark the job as finished while taking
14 a held destination buffer into account (i.e. that buffer won't be
15 returned). This has to be done while job_spinlock is held to avoid
16 race conditions.
17
18 Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
19 Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
20 ---
21 drivers/media/v4l2-core/v4l2-mem2mem.c | 130 ++++++++++++++++++-------
22 include/media/v4l2-mem2mem.h | 33 ++++++-
23 2 files changed, 128 insertions(+), 35 deletions(-)
24
25 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c
26 +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
27 @@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2
28 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
29 struct v4l2_m2m_ctx *m2m_ctx)
30 {
31 - unsigned long flags_job, flags_out, flags_cap;
32 + unsigned long flags_job;
33 + struct vb2_v4l2_buffer *dst, *src;
34
35 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
36
37 @@ -307,20 +308,30 @@ static void __v4l2_m2m_try_queue(struct
38 goto job_unlock;
39 }
40
41 - spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
42 - if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
43 - && !m2m_ctx->out_q_ctx.buffered) {
44 + src = v4l2_m2m_next_src_buf(m2m_ctx);
45 + dst = v4l2_m2m_next_dst_buf(m2m_ctx);
46 + if (!src && !m2m_ctx->out_q_ctx.buffered) {
47 dprintk("No input buffers available\n");
48 - goto out_unlock;
49 + goto job_unlock;
50 }
51 - spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
52 - if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
53 - && !m2m_ctx->cap_q_ctx.buffered) {
54 + if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
55 dprintk("No output buffers available\n");
56 - goto cap_unlock;
57 + goto job_unlock;
58 + }
59 +
60 + if (src && dst &&
61 + dst->is_held && dst->vb2_buf.copied_timestamp &&
62 + dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
63 + dst->is_held = false;
64 + v4l2_m2m_dst_buf_remove(m2m_ctx);
65 + v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
66 + dst = v4l2_m2m_next_dst_buf(m2m_ctx);
67 +
68 + if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
69 + dprintk("No output buffers available after returning held buffer\n");
70 + goto job_unlock;
71 + }
72 }
73 - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
74 - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
75
76 if (m2m_dev->m2m_ops->job_ready
77 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
78 @@ -331,13 +342,6 @@ static void __v4l2_m2m_try_queue(struct
79 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
80 m2m_ctx->job_flags |= TRANS_QUEUED;
81
82 - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
83 - return;
84 -
85 -cap_unlock:
86 - spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
87 -out_unlock:
88 - spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
89 job_unlock:
90 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
91 }
92 @@ -412,37 +416,97 @@ static void v4l2_m2m_cancel_job(struct v
93 }
94 }
95
96 -void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
97 - struct v4l2_m2m_ctx *m2m_ctx)
98 +/*
99 + * Schedule the next job, called from v4l2_m2m_job_finish() or
100 + * v4l2_m2m_buf_done_and_job_finish().
101 + */
102 +static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
103 + struct v4l2_m2m_ctx *m2m_ctx)
104 {
105 - unsigned long flags;
106 + /*
107 + * This instance might have more buffers ready, but since we do not
108 + * allow more than one job on the job_queue per instance, each has
109 + * to be scheduled separately after the previous one finishes.
110 + */
111 + __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
112
113 - spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
114 + /*
115 + * We might be running in atomic context,
116 + * but the job must be run in non-atomic context.
117 + */
118 + schedule_work(&m2m_dev->job_work);
119 +}
120 +
121 +/*
122 + * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
123 + * v4l2_m2m_buf_done_and_job_finish().
124 + */
125 +static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
126 + struct v4l2_m2m_ctx *m2m_ctx)
127 +{
128 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
129 - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
130 dprintk("Called by an instance not currently running\n");
131 - return;
132 + return false;
133 }
134
135 list_del(&m2m_dev->curr_ctx->queue);
136 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
137 wake_up(&m2m_dev->curr_ctx->finished);
138 m2m_dev->curr_ctx = NULL;
139 + return true;
140 +}
141
142 - spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
143 -
144 - /* This instance might have more buffers ready, but since we do not
145 - * allow more than one job on the job_queue per instance, each has
146 - * to be scheduled separately after the previous one finishes. */
147 - __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
148 +void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
149 + struct v4l2_m2m_ctx *m2m_ctx)
150 +{
151 + unsigned long flags;
152 + bool schedule_next;
153
154 - /* We might be running in atomic context,
155 - * but the job must be run in non-atomic context.
156 + /*
157 + * This function should not be used for drivers that support
158 + * holding capture buffers. Those should use
159 + * v4l2_m2m_buf_done_and_job_finish() instead.
160 */
161 - schedule_work(&m2m_dev->job_work);
162 + WARN_ON(m2m_ctx->cap_q_ctx.q.subsystem_flags &
163 + VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
164 + spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
165 + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
166 + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
167 +
168 + if (schedule_next)
169 + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
170 }
171 EXPORT_SYMBOL(v4l2_m2m_job_finish);
172
173 +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
174 + struct v4l2_m2m_ctx *m2m_ctx,
175 + enum vb2_buffer_state state)
176 +{
177 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
178 + bool schedule_next = false;
179 + unsigned long flags;
180 +
181 + spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
182 + src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
183 + dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
184 +
185 + if (WARN_ON(!src_buf || !dst_buf))
186 + goto unlock;
187 + v4l2_m2m_buf_done(src_buf, state);
188 + dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
189 + if (!dst_buf->is_held) {
190 + v4l2_m2m_dst_buf_remove(m2m_ctx);
191 + v4l2_m2m_buf_done(dst_buf, state);
192 + }
193 + schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
194 +unlock:
195 + spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
196 +
197 + if (schedule_next)
198 + v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
199 +}
200 +EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
201 +
202 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
203 struct v4l2_requestbuffers *reqbufs)
204 {
205 --- a/include/media/v4l2-mem2mem.h
206 +++ b/include/media/v4l2-mem2mem.h
207 @@ -21,7 +21,8 @@
208 * callback.
209 * The job does NOT have to end before this callback returns
210 * (and it will be the usual case). When the job finishes,
211 - * v4l2_m2m_job_finish() has to be called.
212 + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
213 + * has to be called.
214 * @job_ready: optional. Should return 0 if the driver does not have a job
215 * fully prepared to run yet (i.e. it will not be able to finish a
216 * transaction without sleeping). If not provided, it will be
217 @@ -33,7 +34,8 @@
218 * stop the device safely; e.g. in the next interrupt handler),
219 * even if the transaction would not have been finished by then.
220 * After the driver performs the necessary steps, it has to call
221 - * v4l2_m2m_job_finish() (as if the transaction ended normally).
222 + * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
223 + * if the transaction ended normally.
224 * This function does not have to (and will usually not) wait
225 * until the device enters a state when it can be stopped.
226 */
227 @@ -173,6 +175,33 @@ void v4l2_m2m_try_schedule(struct v4l2_m
228 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
229 struct v4l2_m2m_ctx *m2m_ctx);
230
231 +/**
232 + * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
233 + * state and inform the framework that a job has been finished and have it
234 + * clean up
235 + *
236 + * @m2m_dev: opaque pointer to the internal data to handle M2M context
237 + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
238 + * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
239 + *
240 + * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
241 + * function instead of job_finish() to take held buffers into account. It is
242 + * optional for other drivers.
243 + *
244 + * This function removes the source buffer from the ready list and returns
245 + * it with the given state. The same is done for the destination buffer, unless
246 + * it is marked 'held'. In that case the buffer is kept on the ready list.
247 + *
248 + * After that the job is finished (see job_finish()).
249 + *
250 + * This allows for multiple output buffers to be used to fill in a single
251 + * capture buffer. This is typically used by stateless decoders where
252 + * multiple e.g. H.264 slices contribute to a single decoded frame.
253 + */
254 +void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
255 + struct v4l2_m2m_ctx *m2m_ctx,
256 + enum vb2_buffer_state state);
257 +
258 static inline void
259 v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
260 {