bcm27xx: add support for linux v5.15
[openwrt/staging/chunkeey.git] / target / linux / bcm27xx / patches-5.15 / 950-0909-drm-vc4-Warn-if-some-v3d-code-is-run-on-BCM2711.patch
1 From cec5fc1572ae50252a492ceeabe1b896f8d521b2 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Thu, 21 Apr 2022 16:29:43 +0200
4 Subject: [PATCH] drm/vc4: Warn if some v3d code is run on BCM2711
5
6 The BCM2711 has a separate driver for the v3d, and thus we can't call
7 into any of the driver entrypoints that rely on the v3d being there.
8
9 Let's add a bunch of checks and complain loudly if that ever happen.
10
11 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
12 ---
13 drivers/gpu/drm/vc4/vc4_bo.c | 49 ++++++++++++++++++++++
14 drivers/gpu/drm/vc4/vc4_drv.c | 11 +++++
15 drivers/gpu/drm/vc4/vc4_drv.h | 6 +++
16 drivers/gpu/drm/vc4/vc4_gem.c | 40 ++++++++++++++++++
17 drivers/gpu/drm/vc4/vc4_irq.c | 16 +++++++
18 drivers/gpu/drm/vc4/vc4_kms.c | 4 ++
19 drivers/gpu/drm/vc4/vc4_perfmon.c | 47 ++++++++++++++++++++-
20 drivers/gpu/drm/vc4/vc4_render_cl.c | 4 ++
21 drivers/gpu/drm/vc4/vc4_v3d.c | 15 +++++++
22 drivers/gpu/drm/vc4/vc4_validate.c | 16 +++++++
23 drivers/gpu/drm/vc4/vc4_validate_shaders.c | 4 ++
24 11 files changed, 211 insertions(+), 1 deletion(-)
25
26 --- a/drivers/gpu/drm/vc4/vc4_bo.c
27 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
28 @@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct
29 {
30 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
31
32 + if (WARN_ON_ONCE(vc4->is_vc5))
33 + return;
34 +
35 mutex_lock(&vc4->purgeable.lock);
36 list_add_tail(&bo->size_head, &vc4->purgeable.list);
37 vc4->purgeable.num++;
38 @@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable
39 {
40 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
41
42 + if (WARN_ON_ONCE(vc4->is_vc5))
43 + return;
44 +
45 /* list_del_init() is used here because the caller might release
46 * the purgeable lock in order to acquire the madv one and update the
47 * madv status.
48 @@ -389,6 +395,9 @@ struct drm_gem_object *vc4_create_object
49 struct vc4_dev *vc4 = to_vc4_dev(dev);
50 struct vc4_bo *bo;
51
52 + if (WARN_ON_ONCE(vc4->is_vc5))
53 + return ERR_PTR(-ENODEV);
54 +
55 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
56 if (!bo)
57 return NULL;
58 @@ -415,6 +424,9 @@ struct vc4_bo *vc4_bo_create(struct drm_
59 struct drm_gem_cma_object *cma_obj;
60 struct vc4_bo *bo;
61
62 + if (WARN_ON_ONCE(vc4->is_vc5))
63 + return ERR_PTR(-ENODEV);
64 +
65 if (size == 0)
66 return ERR_PTR(-EINVAL);
67
68 @@ -477,9 +489,13 @@ int vc4_bo_dumb_create(struct drm_file *
69 struct drm_device *dev,
70 struct drm_mode_create_dumb *args)
71 {
72 + struct vc4_dev *vc4 = to_vc4_dev(dev);
73 struct vc4_bo *bo = NULL;
74 int ret;
75
76 + if (WARN_ON_ONCE(vc4->is_vc5))
77 + return -ENODEV;
78 +
79 ret = vc4_dumb_fixup_args(args);
80 if (ret)
81 return ret;
82 @@ -600,8 +616,12 @@ static void vc4_bo_cache_time_work(struc
83
84 int vc4_bo_inc_usecnt(struct vc4_bo *bo)
85 {
86 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
87 int ret;
88
89 + if (WARN_ON_ONCE(vc4->is_vc5))
90 + return -ENODEV;
91 +
92 /* Fast path: if the BO is already retained by someone, no need to
93 * check the madv status.
94 */
95 @@ -636,6 +656,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
96
97 void vc4_bo_dec_usecnt(struct vc4_bo *bo)
98 {
99 + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
100 +
101 + if (WARN_ON_ONCE(vc4->is_vc5))
102 + return;
103 +
104 /* Fast path: if the BO is still retained by someone, no need to test
105 * the madv value.
106 */
107 @@ -761,6 +786,9 @@ int vc4_create_bo_ioctl(struct drm_devic
108 struct vc4_bo *bo = NULL;
109 int ret;
110
111 + if (WARN_ON_ONCE(vc4->is_vc5))
112 + return -ENODEV;
113 +
114 ret = vc4_grab_bin_bo(vc4, vc4file);
115 if (ret)
116 return ret;
117 @@ -784,9 +812,13 @@ int vc4_create_bo_ioctl(struct drm_devic
118 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
120 {
121 + struct vc4_dev *vc4 = to_vc4_dev(dev);
122 struct drm_vc4_mmap_bo *args = data;
123 struct drm_gem_object *gem_obj;
124
125 + if (WARN_ON_ONCE(vc4->is_vc5))
126 + return -ENODEV;
127 +
128 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
129 if (!gem_obj) {
130 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
131 @@ -810,6 +842,9 @@ vc4_create_shader_bo_ioctl(struct drm_de
132 struct vc4_bo *bo = NULL;
133 int ret;
134
135 + if (WARN_ON_ONCE(vc4->is_vc5))
136 + return -ENODEV;
137 +
138 if (args->size == 0)
139 return -EINVAL;
140
141 @@ -880,11 +915,15 @@ fail:
142 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file_priv)
144 {
145 + struct vc4_dev *vc4 = to_vc4_dev(dev);
146 struct drm_vc4_set_tiling *args = data;
147 struct drm_gem_object *gem_obj;
148 struct vc4_bo *bo;
149 bool t_format;
150
151 + if (WARN_ON_ONCE(vc4->is_vc5))
152 + return -ENODEV;
153 +
154 if (args->flags != 0)
155 return -EINVAL;
156
157 @@ -923,10 +962,14 @@ int vc4_set_tiling_ioctl(struct drm_devi
158 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
159 struct drm_file *file_priv)
160 {
161 + struct vc4_dev *vc4 = to_vc4_dev(dev);
162 struct drm_vc4_get_tiling *args = data;
163 struct drm_gem_object *gem_obj;
164 struct vc4_bo *bo;
165
166 + if (WARN_ON_ONCE(vc4->is_vc5))
167 + return -ENODEV;
168 +
169 if (args->flags != 0 || args->modifier != 0)
170 return -EINVAL;
171
172 @@ -953,6 +996,9 @@ int vc4_bo_cache_init(struct drm_device
173 struct vc4_dev *vc4 = to_vc4_dev(dev);
174 int i;
175
176 + if (WARN_ON_ONCE(vc4->is_vc5))
177 + return -ENODEV;
178 +
179 /* Create the initial set of BO labels that the kernel will
180 * use. This lets us avoid a bunch of string reallocation in
181 * the kernel's draw and BO allocation paths.
182 @@ -1012,6 +1058,9 @@ int vc4_label_bo_ioctl(struct drm_device
183 struct drm_gem_object *gem_obj;
184 int ret = 0, label;
185
186 + if (WARN_ON_ONCE(vc4->is_vc5))
187 + return -ENODEV;
188 +
189 if (!args->len)
190 return -EINVAL;
191
192 --- a/drivers/gpu/drm/vc4/vc4_drv.c
193 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
194 @@ -101,6 +101,9 @@ static int vc4_get_param_ioctl(struct dr
195 if (args->pad != 0)
196 return -EINVAL;
197
198 + if (WARN_ON_ONCE(vc4->is_vc5))
199 + return -ENODEV;
200 +
201 if (!vc4->v3d)
202 return -ENODEV;
203
204 @@ -144,11 +147,16 @@ static int vc4_get_param_ioctl(struct dr
205
206 static int vc4_open(struct drm_device *dev, struct drm_file *file)
207 {
208 + struct vc4_dev *vc4 = to_vc4_dev(dev);
209 struct vc4_file *vc4file;
210
211 + if (WARN_ON_ONCE(vc4->is_vc5))
212 + return -ENODEV;
213 +
214 vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
215 if (!vc4file)
216 return -ENOMEM;
217 + vc4file->dev = vc4;
218
219 vc4_perfmon_open_file(vc4file);
220 file->driver_priv = vc4file;
221 @@ -160,6 +168,9 @@ static void vc4_close(struct drm_device
222 struct vc4_dev *vc4 = to_vc4_dev(dev);
223 struct vc4_file *vc4file = file->driver_priv;
224
225 + if (WARN_ON_ONCE(vc4->is_vc5))
226 + return;
227 +
228 if (vc4file->bin_bo_used)
229 vc4_v3d_bin_bo_put(vc4);
230
231 --- a/drivers/gpu/drm/vc4/vc4_drv.h
232 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
233 @@ -49,6 +49,8 @@ enum vc4_kernel_bo_type {
234 * done. This way, only events related to a specific job will be counted.
235 */
236 struct vc4_perfmon {
237 + struct vc4_dev *dev;
238 +
239 /* Tracks the number of users of the perfmon, when this counter reaches
240 * zero the perfmon is destroyed.
241 */
242 @@ -612,6 +614,8 @@ to_vc4_crtc_state(struct drm_crtc_state
243 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
244
245 struct vc4_exec_info {
246 + struct vc4_dev *dev;
247 +
248 /* Sequence number for this bin/render job. */
249 uint64_t seqno;
250
251 @@ -733,6 +737,8 @@ struct vc4_exec_info {
252 * released when the DRM file is closed should be placed here.
253 */
254 struct vc4_file {
255 + struct vc4_dev *dev;
256 +
257 struct {
258 struct idr idr;
259 struct mutex lock;
260 --- a/drivers/gpu/drm/vc4/vc4_gem.c
261 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
262 @@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_devi
263 u32 i;
264 int ret = 0;
265
266 + if (WARN_ON_ONCE(vc4->is_vc5))
267 + return -ENODEV;
268 +
269 if (!vc4->v3d) {
270 DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
271 return -ENODEV;
272 @@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *de
273 unsigned long timeout_expire;
274 DEFINE_WAIT(wait);
275
276 + if (WARN_ON_ONCE(vc4->is_vc5))
277 + return -ENODEV;
278 +
279 if (vc4->finished_seqno >= seqno)
280 return 0;
281
282 @@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_devic
283 struct vc4_dev *vc4 = to_vc4_dev(dev);
284 struct vc4_exec_info *exec;
285
286 + if (WARN_ON_ONCE(vc4->is_vc5))
287 + return;
288 +
289 again:
290 exec = vc4_first_bin_job(vc4);
291 if (!exec)
292 @@ -511,6 +520,9 @@ vc4_submit_next_render_job(struct drm_de
293 if (!exec)
294 return;
295
296 + if (WARN_ON_ONCE(vc4->is_vc5))
297 + return;
298 +
299 /* A previous RCL may have written to one of our textures, and
300 * our full cache flush at bin time may have occurred before
301 * that RCL completed. Flush the texture cache now, but not
302 @@ -528,6 +540,9 @@ vc4_move_job_to_render(struct drm_device
303 struct vc4_dev *vc4 = to_vc4_dev(dev);
304 bool was_empty = list_empty(&vc4->render_job_list);
305
306 + if (WARN_ON_ONCE(vc4->is_vc5))
307 + return;
308 +
309 list_move_tail(&exec->head, &vc4->render_job_list);
310 if (was_empty)
311 vc4_submit_next_render_job(dev);
312 @@ -992,6 +1007,9 @@ vc4_job_handle_completed(struct vc4_dev
313 unsigned long irqflags;
314 struct vc4_seqno_cb *cb, *cb_temp;
315
316 + if (WARN_ON_ONCE(vc4->is_vc5))
317 + return;
318 +
319 spin_lock_irqsave(&vc4->job_lock, irqflags);
320 while (!list_empty(&vc4->job_done_list)) {
321 struct vc4_exec_info *exec =
322 @@ -1028,6 +1046,9 @@ int vc4_queue_seqno_cb(struct drm_device
323 struct vc4_dev *vc4 = to_vc4_dev(dev);
324 unsigned long irqflags;
325
326 + if (WARN_ON_ONCE(vc4->is_vc5))
327 + return -ENODEV;
328 +
329 cb->func = func;
330 INIT_WORK(&cb->work, vc4_seqno_cb_work);
331
332 @@ -1078,8 +1099,12 @@ int
333 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
334 struct drm_file *file_priv)
335 {
336 + struct vc4_dev *vc4 = to_vc4_dev(dev);
337 struct drm_vc4_wait_seqno *args = data;
338
339 + if (WARN_ON_ONCE(vc4->is_vc5))
340 + return -ENODEV;
341 +
342 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
343 &args->timeout_ns);
344 }
345 @@ -1088,11 +1113,15 @@ int
346 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *file_priv)
348 {
349 + struct vc4_dev *vc4 = to_vc4_dev(dev);
350 int ret;
351 struct drm_vc4_wait_bo *args = data;
352 struct drm_gem_object *gem_obj;
353 struct vc4_bo *bo;
354
355 + if (WARN_ON_ONCE(vc4->is_vc5))
356 + return -ENODEV;
357 +
358 if (args->pad != 0)
359 return -EINVAL;
360
361 @@ -1135,6 +1164,9 @@ vc4_submit_cl_ioctl(struct drm_device *d
362 struct dma_fence *in_fence;
363 int ret = 0;
364
365 + if (WARN_ON_ONCE(vc4->is_vc5))
366 + return -ENODEV;
367 +
368 if (!vc4->v3d) {
369 DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
370 return -ENODEV;
371 @@ -1158,6 +1190,7 @@ vc4_submit_cl_ioctl(struct drm_device *d
372 DRM_ERROR("malloc failure on exec struct\n");
373 return -ENOMEM;
374 }
375 + exec->dev = vc4;
376
377 ret = vc4_v3d_pm_get(vc4);
378 if (ret) {
379 @@ -1267,6 +1300,9 @@ int vc4_gem_init(struct drm_device *dev)
380 {
381 struct vc4_dev *vc4 = to_vc4_dev(dev);
382
383 + if (WARN_ON_ONCE(vc4->is_vc5))
384 + return -ENODEV;
385 +
386 vc4->dma_fence_context = dma_fence_context_alloc(1);
387
388 INIT_LIST_HEAD(&vc4->bin_job_list);
389 @@ -1312,11 +1348,15 @@ static void vc4_gem_destroy(struct drm_d
390 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
391 struct drm_file *file_priv)
392 {
393 + struct vc4_dev *vc4 = to_vc4_dev(dev);
394 struct drm_vc4_gem_madvise *args = data;
395 struct drm_gem_object *gem_obj;
396 struct vc4_bo *bo;
397 int ret;
398
399 + if (WARN_ON_ONCE(vc4->is_vc5))
400 + return -ENODEV;
401 +
402 switch (args->madv) {
403 case VC4_MADV_DONTNEED:
404 case VC4_MADV_WILLNEED:
405 --- a/drivers/gpu/drm/vc4/vc4_irq.c
406 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
407 @@ -260,6 +260,9 @@ vc4_irq_enable(struct drm_device *dev)
408 {
409 struct vc4_dev *vc4 = to_vc4_dev(dev);
410
411 + if (WARN_ON_ONCE(vc4->is_vc5))
412 + return;
413 +
414 if (!vc4->v3d)
415 return;
416
417 @@ -274,6 +277,9 @@ vc4_irq_disable(struct drm_device *dev)
418 {
419 struct vc4_dev *vc4 = to_vc4_dev(dev);
420
421 + if (WARN_ON_ONCE(vc4->is_vc5))
422 + return;
423 +
424 if (!vc4->v3d)
425 return;
426
427 @@ -291,8 +297,12 @@ vc4_irq_disable(struct drm_device *dev)
428
429 int vc4_irq_install(struct drm_device *dev, int irq)
430 {
431 + struct vc4_dev *vc4 = to_vc4_dev(dev);
432 int ret;
433
434 + if (WARN_ON_ONCE(vc4->is_vc5))
435 + return -ENODEV;
436 +
437 if (irq == IRQ_NOTCONNECTED)
438 return -ENOTCONN;
439
440 @@ -311,6 +321,9 @@ void vc4_irq_uninstall(struct drm_device
441 {
442 struct vc4_dev *vc4 = to_vc4_dev(dev);
443
444 + if (WARN_ON_ONCE(vc4->is_vc5))
445 + return;
446 +
447 vc4_irq_disable(dev);
448 free_irq(vc4->irq, dev);
449 }
450 @@ -321,6 +334,9 @@ void vc4_irq_reset(struct drm_device *de
451 struct vc4_dev *vc4 = to_vc4_dev(dev);
452 unsigned long irqflags;
453
454 + if (WARN_ON_ONCE(vc4->is_vc5))
455 + return;
456 +
457 /* Acknowledge any stale IRQs. */
458 V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
459
460 --- a/drivers/gpu/drm/vc4/vc4_kms.c
461 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
462 @@ -485,8 +485,12 @@ static struct drm_framebuffer *vc4_fb_cr
463 struct drm_file *file_priv,
464 const struct drm_mode_fb_cmd2 *mode_cmd)
465 {
466 + struct vc4_dev *vc4 = to_vc4_dev(dev);
467 struct drm_mode_fb_cmd2 mode_cmd_local;
468
469 + if (WARN_ON_ONCE(vc4->is_vc5))
470 + return ERR_PTR(-ENODEV);
471 +
472 /* If the user didn't specify a modifier, use the
473 * vc4_set_tiling_ioctl() state for the BO.
474 */
475 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c
476 +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
477 @@ -17,13 +17,27 @@
478
479 void vc4_perfmon_get(struct vc4_perfmon *perfmon)
480 {
481 + struct vc4_dev *vc4 = perfmon->dev;
482 +
483 + if (WARN_ON_ONCE(vc4->is_vc5))
484 + return;
485 +
486 if (perfmon)
487 refcount_inc(&perfmon->refcnt);
488 }
489
490 void vc4_perfmon_put(struct vc4_perfmon *perfmon)
491 {
492 - if (perfmon && refcount_dec_and_test(&perfmon->refcnt))
493 + struct vc4_dev *vc4;
494 +
495 + if (!perfmon)
496 + return;
497 +
498 + vc4 = perfmon->dev;
499 + if (WARN_ON_ONCE(vc4->is_vc5))
500 + return;
501 +
502 + if (refcount_dec_and_test(&perfmon->refcnt))
503 kfree(perfmon);
504 }
505
506 @@ -32,6 +46,9 @@ void vc4_perfmon_start(struct vc4_dev *v
507 unsigned int i;
508 u32 mask;
509
510 + if (WARN_ON_ONCE(vc4->is_vc5))
511 + return;
512 +
513 if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
514 return;
515
516 @@ -49,6 +66,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc
517 {
518 unsigned int i;
519
520 + if (WARN_ON_ONCE(vc4->is_vc5))
521 + return;
522 +
523 if (WARN_ON_ONCE(!vc4->active_perfmon ||
524 perfmon != vc4->active_perfmon))
525 return;
526 @@ -64,8 +84,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc
527
528 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
529 {
530 + struct vc4_dev *vc4 = vc4file->dev;
531 struct vc4_perfmon *perfmon;
532
533 + if (WARN_ON_ONCE(vc4->is_vc5))
534 + return NULL;
535 +
536 mutex_lock(&vc4file->perfmon.lock);
537 perfmon = idr_find(&vc4file->perfmon.idr, id);
538 vc4_perfmon_get(perfmon);
539 @@ -76,8 +100,14 @@ struct vc4_perfmon *vc4_perfmon_find(str
540
541 void vc4_perfmon_open_file(struct vc4_file *vc4file)
542 {
543 + struct vc4_dev *vc4 = vc4file->dev;
544 +
545 + if (WARN_ON_ONCE(vc4->is_vc5))
546 + return;
547 +
548 mutex_init(&vc4file->perfmon.lock);
549 idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
550 + vc4file->dev = vc4;
551 }
552
553 static int vc4_perfmon_idr_del(int id, void *elem, void *data)
554 @@ -91,6 +121,11 @@ static int vc4_perfmon_idr_del(int id, v
555
556 void vc4_perfmon_close_file(struct vc4_file *vc4file)
557 {
558 + struct vc4_dev *vc4 = vc4file->dev;
559 +
560 + if (WARN_ON_ONCE(vc4->is_vc5))
561 + return;
562 +
563 mutex_lock(&vc4file->perfmon.lock);
564 idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
565 idr_destroy(&vc4file->perfmon.idr);
566 @@ -107,6 +142,9 @@ int vc4_perfmon_create_ioctl(struct drm_
567 unsigned int i;
568 int ret;
569
570 + if (WARN_ON_ONCE(vc4->is_vc5))
571 + return -ENODEV;
572 +
573 if (!vc4->v3d) {
574 DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
575 return -ENODEV;
576 @@ -127,6 +165,7 @@ int vc4_perfmon_create_ioctl(struct drm_
577 GFP_KERNEL);
578 if (!perfmon)
579 return -ENOMEM;
580 + perfmon->dev = vc4;
581
582 for (i = 0; i < req->ncounters; i++)
583 perfmon->events[i] = req->events[i];
584 @@ -157,6 +196,9 @@ int vc4_perfmon_destroy_ioctl(struct drm
585 struct drm_vc4_perfmon_destroy *req = data;
586 struct vc4_perfmon *perfmon;
587
588 + if (WARN_ON_ONCE(vc4->is_vc5))
589 + return -ENODEV;
590 +
591 if (!vc4->v3d) {
592 DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
593 return -ENODEV;
594 @@ -182,6 +224,9 @@ int vc4_perfmon_get_values_ioctl(struct
595 struct vc4_perfmon *perfmon;
596 int ret;
597
598 + if (WARN_ON_ONCE(vc4->is_vc5))
599 + return -ENODEV;
600 +
601 if (!vc4->v3d) {
602 DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
603 return -ENODEV;
604 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c
605 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
606 @@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(stru
607
608 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
609 {
610 + struct vc4_dev *vc4 = to_vc4_dev(dev);
611 struct vc4_rcl_setup setup = {0};
612 struct drm_vc4_submit_cl *args = exec->args;
613 bool has_bin = args->bin_cl_size != 0;
614 int ret;
615
616 + if (WARN_ON_ONCE(vc4->is_vc5))
617 + return -ENODEV;
618 +
619 if (args->min_x_tile > args->max_x_tile ||
620 args->min_y_tile > args->max_y_tile) {
621 DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
622 --- a/drivers/gpu/drm/vc4/vc4_v3d.c
623 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
624 @@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct
625 int
626 vc4_v3d_pm_get(struct vc4_dev *vc4)
627 {
628 + if (WARN_ON_ONCE(vc4->is_vc5))
629 + return -ENODEV;
630 +
631 mutex_lock(&vc4->power_lock);
632 if (vc4->power_refcount++ == 0) {
633 int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
634 @@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
635 void
636 vc4_v3d_pm_put(struct vc4_dev *vc4)
637 {
638 + if (WARN_ON_ONCE(vc4->is_vc5))
639 + return;
640 +
641 mutex_lock(&vc4->power_lock);
642 if (--vc4->power_refcount == 0) {
643 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
644 @@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev
645 uint64_t seqno = 0;
646 struct vc4_exec_info *exec;
647
648 + if (WARN_ON_ONCE(vc4->is_vc5))
649 + return -ENODEV;
650 +
651 try_again:
652 spin_lock_irqsave(&vc4->job_lock, irqflags);
653 slot = ffs(~vc4->bin_alloc_used);
654 @@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *v
655 {
656 int ret = 0;
657
658 + if (WARN_ON_ONCE(vc4->is_vc5))
659 + return -ENODEV;
660 +
661 mutex_lock(&vc4->bin_bo_lock);
662
663 if (used && *used)
664 @@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *
665
666 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
667 {
668 + if (WARN_ON_ONCE(vc4->is_vc5))
669 + return;
670 +
671 mutex_lock(&vc4->bin_bo_lock);
672 kref_put(&vc4->bin_bo_kref, bin_bo_release);
673 mutex_unlock(&vc4->bin_bo_lock);
674 --- a/drivers/gpu/drm/vc4/vc4_validate.c
675 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
676 @@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t heig
677 struct drm_gem_cma_object *
678 vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
679 {
680 + struct vc4_dev *vc4 = exec->dev;
681 struct drm_gem_cma_object *obj;
682 struct vc4_bo *bo;
683
684 + if (WARN_ON_ONCE(vc4->is_vc5))
685 + return NULL;
686 +
687 if (hindex >= exec->bo_count) {
688 DRM_DEBUG("BO index %d greater than BO count %d\n",
689 hindex, exec->bo_count);
690 @@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info
691 uint32_t offset, uint8_t tiling_format,
692 uint32_t width, uint32_t height, uint8_t cpp)
693 {
694 + struct vc4_dev *vc4 = exec->dev;
695 uint32_t aligned_width, aligned_height, stride, size;
696 uint32_t utile_w = utile_width(cpp);
697 uint32_t utile_h = utile_height(cpp);
698
699 + if (WARN_ON_ONCE(vc4->is_vc5))
700 + return -ENODEV;
701 +
702 /* The shaded vertex format stores signed 12.4 fixed point
703 * (-2048,2047) offsets from the viewport center, so we should
704 * never have a render target larger than 4096. The texture
705 @@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *d
706 void *unvalidated,
707 struct vc4_exec_info *exec)
708 {
709 + struct vc4_dev *vc4 = to_vc4_dev(dev);
710 uint32_t len = exec->args->bin_cl_size;
711 uint32_t dst_offset = 0;
712 uint32_t src_offset = 0;
713
714 + if (WARN_ON_ONCE(vc4->is_vc5))
715 + return -ENODEV;
716 +
717 while (src_offset < len) {
718 void *dst_pkt = validated + dst_offset;
719 void *src_pkt = unvalidated + src_offset;
720 @@ -926,9 +938,13 @@ int
721 vc4_validate_shader_recs(struct drm_device *dev,
722 struct vc4_exec_info *exec)
723 {
724 + struct vc4_dev *vc4 = to_vc4_dev(dev);
725 uint32_t i;
726 int ret = 0;
727
728 + if (WARN_ON_ONCE(vc4->is_vc5))
729 + return -ENODEV;
730 +
731 for (i = 0; i < exec->shader_state_count; i++) {
732 ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
733 if (ret)
734 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
735 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
736 @@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shad
737 struct vc4_validated_shader_info *
738 vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
739 {
740 + struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
741 bool found_shader_end = false;
742 int shader_end_ip = 0;
743 uint32_t last_thread_switch_ip = -3;
744 @@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_o
745 struct vc4_validated_shader_info *validated_shader = NULL;
746 struct vc4_shader_validation_state validation_state;
747
748 + if (WARN_ON_ONCE(vc4->is_vc5))
749 + return NULL;
750 +
751 memset(&validation_state, 0, sizeof(validation_state));
752 validation_state.shader = shader_obj->vaddr;
753 validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);