brcm2708: update to latest patches from the RPi foundation
[openwrt/staging/lynxis.git] / target / linux / brcm2708 / patches-4.14 / 950-0043-vcsm-VideoCore-shared-memory-service-for-BCM2835.patch
1 From af66401c883e2b3d23d20687ebd05cbc8878404e Mon Sep 17 00:00:00 2001
2 From: Tim Gover <tgover@broadcom.com>
3 Date: Tue, 22 Jul 2014 15:41:04 +0100
4 Subject: [PATCH 043/454] vcsm: VideoCore shared memory service for BCM2835
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Add experimental support for the VideoCore shared memory service.
10 This allows user processes to allocate memory from VideoCore's
11 GPU relocatable heap and mmap the buffers. Additionally, the memory
12 handles can passed to other VideoCore services such as MMAL, OpenMax
13 and DispmanX
14
15 TODO
16 * This driver was originally released for BCM28155 which has a different
17 cache architecture to BCM2835. Consequently, in this release only
18 uncached mappings are supported. However, there's no fundamental
19 reason which cached mappings cannot be support or BCM2835
20 * More refactoring is required to remove the typedefs.
21 * Re-enable the some of the commented out debug-fs statistics which were
22 disabled when migrating code from proc-fs.
23 * There's a lot of code to support sharing of VCSM in order to support
24 Android. This could probably done more cleanly or perhaps just
25 removed.
26
27 Signed-off-by: Tim Gover <timgover@gmail.com>
28
29 config: Disable VC_SM for now to fix hang with cutdown kernel
30
31 vcsm: Use boolean as it cannot be built as module
32
33 On building the bcm_vc_sm as a module we get the following error:
34
35 v7_dma_flush_range and do_munmap are undefined in vc-sm.ko.
36
37 Fix by making it not an option to build as module
38
39 vcsm: Add ioctl for custom cache flushing
40
41 vc-sm: Move headers out of arch directory
42
43 Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
44
45 vcsm: Treat EBUSY as success rather than SIGBUS
46
47 Currently if two cores access the same page concurrently one will return VM_FAULT_NOPAGE
48 and the other VM_FAULT_SIGBUS crashing the user code.
49
50 Also report when mapping fails.
51
52 Signed-off-by: popcornmix <popcornmix@gmail.com>
53
54 vcsm: Provide new ioctl to clean/invalidate a 2D block
55
56 vcsm: Convert to loading via device tree.
57
58 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
59
60 VCSM: New option to import a DMABUF for VPU use
61
62 Takes a dmabuf, and then calls over to the VPU to wrap
63 it into a suitable handle.
64
65 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
66
67 vcsm: fix multi-platform build
68
69 vcsm: add macros for cache functions
70
71 vcsm: use dma APIs for cache functions
72
73 * Will handle multi-platform builds
74
75 vcsm: Fix up macros to avoid breaking numbers used by existing apps
76 ---
77 drivers/char/Kconfig | 2 +
78 drivers/char/Makefile | 1 +
79 drivers/char/broadcom/Kconfig | 10 +
80 drivers/char/broadcom/Makefile | 1 +
81 drivers/char/broadcom/vc_sm/Makefile | 9 +
82 drivers/char/broadcom/vc_sm/vc_sm_defs.h | 237 ++
83 drivers/char/broadcom/vc_sm/vc_sm_knl.h | 58 +
84 drivers/char/broadcom/vc_sm/vc_vchi_sm.c | 516 ++++
85 drivers/char/broadcom/vc_sm/vc_vchi_sm.h | 102 +
86 drivers/char/broadcom/vc_sm/vmcs_sm.c | 3493 ++++++++++++++++++++++
87 include/linux/broadcom/vmcs_sm_ioctl.h | 280 ++
88 11 files changed, 4709 insertions(+)
89 create mode 100644 drivers/char/broadcom/vc_sm/Makefile
90 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_defs.h
91 create mode 100644 drivers/char/broadcom/vc_sm/vc_sm_knl.h
92 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.c
93 create mode 100644 drivers/char/broadcom/vc_sm/vc_vchi_sm.h
94 create mode 100644 drivers/char/broadcom/vc_sm/vmcs_sm.c
95 create mode 100644 include/linux/broadcom/vmcs_sm_ioctl.h
96
97 --- a/drivers/char/Kconfig
98 +++ b/drivers/char/Kconfig
99 @@ -5,6 +5,8 @@
100
101 menu "Character devices"
102
103 +source "drivers/char/broadcom/Kconfig"
104 +
105 source "drivers/tty/Kconfig"
106
107 config DEVMEM
108 --- a/drivers/char/Makefile
109 +++ b/drivers/char/Makefile
110 @@ -60,3 +60,4 @@ js-rtc-y = rtc.o
111 obj-$(CONFIG_TILE_SROM) += tile-srom.o
112 obj-$(CONFIG_XILLYBUS) += xillybus/
113 obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
114 +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/
115 --- a/drivers/char/broadcom/Kconfig
116 +++ b/drivers/char/broadcom/Kconfig
117 @@ -16,3 +16,13 @@ config BCM2708_VCMEM
118 Helper for videocore memory access and total size allocation.
119
120 endif
121 +
122 +config BCM_VC_SM
123 + bool "VMCS Shared Memory"
124 + depends on BCM2835_VCHIQ
125 + select BCM2708_VCMEM
126 + select DMA_SHARED_BUFFER
127 + default n
128 + help
129 + Support for the VC shared memory on the Broadcom reference
130 + design. Uses the VCHIQ stack.
131 --- a/drivers/char/broadcom/Makefile
132 +++ b/drivers/char/broadcom/Makefile
133 @@ -1 +1,2 @@
134 obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o
135 +obj-$(CONFIG_BCM_VC_SM) += vc_sm/
136 --- /dev/null
137 +++ b/drivers/char/broadcom/vc_sm/Makefile
138 @@ -0,0 +1,9 @@
139 +ccflags-$(CONFIG_BCM_VC_SM) += -Werror -Wall -Wstrict-prototypes -Wno-trigraphs -O2
140 +ccflags-$(CONFIG_BCM_VC_SM) += -I"drivers/staging/vc04_services" -I"drivers/staging/vc04_services/interface/vchi" -I"drivers/staging/vc04_services/interface/vchiq_arm" -I"$(srctree)/fs/"
141 +ccflags-$(CONFIG_BCM_VC_SM) += -DOS_ASSERT_FAILURE -D__STDC_VERSION=199901L -D__STDC_VERSION__=199901L -D__VCCOREVER__=0 -D__KERNEL__ -D__linux__
142 +
143 +obj-$(CONFIG_BCM_VC_SM) := vc-sm.o
144 +
145 +vc-sm-objs := \
146 + vmcs_sm.o \
147 + vc_vchi_sm.o
148 --- /dev/null
149 +++ b/drivers/char/broadcom/vc_sm/vc_sm_defs.h
150 @@ -0,0 +1,237 @@
151 +/*
152 + ****************************************************************************
153 + * Copyright 2011 Broadcom Corporation. All rights reserved.
154 + *
155 + * Unless you and Broadcom execute a separate written software license
156 + * agreement governing use of this software, this software is licensed to you
157 + * under the terms of the GNU General Public License version 2, available at
158 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
159 + *
160 + * Notwithstanding the above, under no circumstances may you combine this
161 + * software in any way with any other Broadcom software provided under a
162 + * license other than the GPL, without Broadcom's express prior written
163 + * consent.
164 + ****************************************************************************
165 + */
166 +
167 +#ifndef __VC_SM_DEFS_H__INCLUDED__
168 +#define __VC_SM_DEFS_H__INCLUDED__
169 +
170 +/* FourCC code used for VCHI connection */
171 +#define VC_SM_SERVER_NAME MAKE_FOURCC("SMEM")
172 +
173 +/* Maximum message length */
174 +#define VC_SM_MAX_MSG_LEN (sizeof(union vc_sm_msg_union_t) + \
175 + sizeof(struct vc_sm_msg_hdr_t))
176 +#define VC_SM_MAX_RSP_LEN (sizeof(union vc_sm_msg_union_t))
177 +
178 +/* Resource name maximum size */
179 +#define VC_SM_RESOURCE_NAME 32
180 +
181 +enum vc_sm_msg_type {
182 + /* Message types supported for HOST->VC direction */
183 +
184 + /* Allocate shared memory block */
185 + VC_SM_MSG_TYPE_ALLOC,
186 + /* Lock allocated shared memory block */
187 + VC_SM_MSG_TYPE_LOCK,
188 + /* Unlock allocated shared memory block */
189 + VC_SM_MSG_TYPE_UNLOCK,
190 + /* Unlock allocated shared memory block, do not answer command */
191 + VC_SM_MSG_TYPE_UNLOCK_NOANS,
192 + /* Free shared memory block */
193 + VC_SM_MSG_TYPE_FREE,
194 + /* Resize a shared memory block */
195 + VC_SM_MSG_TYPE_RESIZE,
196 + /* Walk the allocated shared memory block(s) */
197 + VC_SM_MSG_TYPE_WALK_ALLOC,
198 +
199 + /* A previously applied action will need to be reverted */
200 + VC_SM_MSG_TYPE_ACTION_CLEAN,
201 +
202 + /*
203 + * Import a physical address and wrap into a MEM_HANDLE_T.
204 + * Release with VC_SM_MSG_TYPE_FREE.
205 + */
206 + VC_SM_MSG_TYPE_IMPORT,
207 +
208 + /* Message types supported for VC->HOST direction */
209 +
210 + /*
211 + * VC has finished with an imported memory allocation.
212 + * Release any Linux reference counts on the underlying block.
213 + */
214 + VC_SM_MSG_TYPE_RELEASED,
215 +
216 + VC_SM_MSG_TYPE_MAX
217 +};
218 +
219 +/* Type of memory to be allocated */
220 +enum vc_sm_alloc_type_t {
221 + VC_SM_ALLOC_CACHED,
222 + VC_SM_ALLOC_NON_CACHED,
223 +};
224 +
225 +/* Message header for all messages in HOST->VC direction */
226 +struct vc_sm_msg_hdr_t {
227 + int32_t type;
228 + uint32_t trans_id;
229 + uint8_t body[0];
230 +
231 +};
232 +
233 +/* Request to allocate memory (HOST->VC) */
234 +struct vc_sm_alloc_t {
235 + /* type of memory to allocate */
236 + enum vc_sm_alloc_type_t type;
237 + /* byte amount of data to allocate per unit */
238 + uint32_t base_unit;
239 + /* number of unit to allocate */
240 + uint32_t num_unit;
241 + /* alignement to be applied on allocation */
242 + uint32_t alignement;
243 + /* identity of who allocated this block */
244 + uint32_t allocator;
245 + /* resource name (for easier tracking on vc side) */
246 + char name[VC_SM_RESOURCE_NAME];
247 +
248 +};
249 +
250 +/* Result of a requested memory allocation (VC->HOST) */
251 +struct vc_sm_alloc_result_t {
252 + /* Transaction identifier */
253 + uint32_t trans_id;
254 +
255 + /* Resource handle */
256 + uint32_t res_handle;
257 + /* Pointer to resource buffer */
258 + uint32_t res_mem;
259 + /* Resource base size (bytes) */
260 + uint32_t res_base_size;
261 + /* Resource number */
262 + uint32_t res_num;
263 +
264 +};
265 +
266 +/* Request to free a previously allocated memory (HOST->VC) */
267 +struct vc_sm_free_t {
268 + /* Resource handle (returned from alloc) */
269 + uint32_t res_handle;
270 + /* Resource buffer (returned from alloc) */
271 + uint32_t res_mem;
272 +
273 +};
274 +
275 +/* Request to lock a previously allocated memory (HOST->VC) */
276 +struct vc_sm_lock_unlock_t {
277 + /* Resource handle (returned from alloc) */
278 + uint32_t res_handle;
279 + /* Resource buffer (returned from alloc) */
280 + uint32_t res_mem;
281 +
282 +};
283 +
284 +/* Request to resize a previously allocated memory (HOST->VC) */
285 +struct vc_sm_resize_t {
286 + /* Resource handle (returned from alloc) */
287 + uint32_t res_handle;
288 + /* Resource buffer (returned from alloc) */
289 + uint32_t res_mem;
290 + /* Resource *new* size requested (bytes) */
291 + uint32_t res_new_size;
292 +
293 +};
294 +
295 +/* Result of a requested memory lock (VC->HOST) */
296 +struct vc_sm_lock_result_t {
297 + /* Transaction identifier */
298 + uint32_t trans_id;
299 +
300 + /* Resource handle */
301 + uint32_t res_handle;
302 + /* Pointer to resource buffer */
303 + uint32_t res_mem;
304 + /*
305 + * Pointer to former resource buffer if the memory
306 + * was reallocated
307 + */
308 + uint32_t res_old_mem;
309 +
310 +};
311 +
312 +/* Generic result for a request (VC->HOST) */
313 +struct vc_sm_result_t {
314 + /* Transaction identifier */
315 + uint32_t trans_id;
316 +
317 + int32_t success;
318 +
319 +};
320 +
321 +/* Request to revert a previously applied action (HOST->VC) */
322 +struct vc_sm_action_clean_t {
323 + /* Action of interest */
324 + enum vc_sm_msg_type res_action;
325 + /* Transaction identifier for the action of interest */
326 + uint32_t action_trans_id;
327 +
328 +};
329 +
330 +/* Request to remove all data associated with a given allocator (HOST->VC) */
331 +struct vc_sm_free_all_t {
332 + /* Allocator identifier */
333 + uint32_t allocator;
334 +};
335 +
336 +/* Request to import memory (HOST->VC) */
337 +struct vc_sm_import {
338 + /* type of memory to allocate */
339 + enum vc_sm_alloc_type_t type;
340 + /* pointer to the VC (ie physical) address of the allocated memory */
341 + uint32_t addr;
342 + /* size of buffer */
343 + uint32_t size;
344 + /* opaque handle returned in RELEASED messages */
345 + int32_t kernel_id;
346 + /* Allocator identifier */
347 + uint32_t allocator;
348 + /* resource name (for easier tracking on vc side) */
349 + char name[VC_SM_RESOURCE_NAME];
350 +};
351 +
352 +/* Result of a requested memory import (VC->HOST) */
353 +struct vc_sm_import_result {
354 + /* Transaction identifier */
355 + uint32_t trans_id;
356 +
357 + /* Resource handle */
358 + uint32_t res_handle;
359 +};
360 +
361 +/* Notification that VC has finished with an allocation (VC->HOST) */
362 +struct vc_sm_released {
363 + /* pointer to the VC (ie physical) address of the allocated memory */
364 + uint32_t addr;
365 + /* size of buffer */
366 + uint32_t size;
367 + /* opaque handle returned in RELEASED messages */
368 + int32_t kernel_id;
369 +};
370 +
371 +/* Union of ALL messages */
372 +union vc_sm_msg_union_t {
373 + struct vc_sm_alloc_t alloc;
374 + struct vc_sm_alloc_result_t alloc_result;
375 + struct vc_sm_free_t free;
376 + struct vc_sm_lock_unlock_t lock_unlock;
377 + struct vc_sm_action_clean_t action_clean;
378 + struct vc_sm_resize_t resize;
379 + struct vc_sm_lock_result_t lock_result;
380 + struct vc_sm_result_t result;
381 + struct vc_sm_free_all_t free_all;
382 + struct vc_sm_import import;
383 + struct vc_sm_import_result import_result;
384 + struct vc_sm_released released;
385 +};
386 +
387 +#endif /* __VC_SM_DEFS_H__INCLUDED__ */
388 --- /dev/null
389 +++ b/drivers/char/broadcom/vc_sm/vc_sm_knl.h
390 @@ -0,0 +1,58 @@
391 +/*
392 + ****************************************************************************
393 + * Copyright 2011 Broadcom Corporation. All rights reserved.
394 + *
395 + * Unless you and Broadcom execute a separate written software license
396 + * agreement governing use of this software, this software is licensed to you
397 + * under the terms of the GNU General Public License version 2, available at
398 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
399 + *
400 + * Notwithstanding the above, under no circumstances may you combine this
401 + * software in any way with any other Broadcom software provided under a
402 + * license other than the GPL, without Broadcom's express prior written
403 + * consent.
404 + ****************************************************************************
405 + */
406 +
407 +#ifndef __VC_SM_KNL_H__INCLUDED__
408 +#define __VC_SM_KNL_H__INCLUDED__
409 +
410 +#if !defined(__KERNEL__)
411 +#error "This interface is for kernel use only..."
412 +#endif
413 +
414 +/* Type of memory to be locked (ie mapped) */
415 +enum vc_sm_lock_cache_mode {
416 + VC_SM_LOCK_CACHED,
417 + VC_SM_LOCK_NON_CACHED,
418 +};
419 +
420 +/* Cache functions */
421 +#define VCSM_CACHE_OP_INV 0x01
422 +#define VCSM_CACHE_OP_CLEAN 0x02
423 +#define VCSM_CACHE_OP_FLUSH 0x03
424 +
425 +/* Allocate a shared memory handle and block. */
426 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle);
427 +
428 +/* Free a previously allocated shared memory handle and block. */
429 +int vc_sm_free(int handle);
430 +
431 +/* Lock a memory handle for use by kernel. */
432 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
433 + unsigned long *data);
434 +
435 +/* Unlock a memory handle in use by kernel. */
436 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock);
437 +
438 +/* Get an internal resource handle mapped from the external one. */
439 +int vc_sm_int_handle(int handle);
440 +
441 +/* Map a shared memory region for use by kernel. */
442 +int vc_sm_map(int handle, unsigned int sm_addr,
443 + enum vc_sm_lock_cache_mode mode, unsigned long *data);
444 +
445 +/* Import a block of memory into the GPU space. */
446 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle);
447 +
448 +#endif /* __VC_SM_KNL_H__INCLUDED__ */
449 --- /dev/null
450 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.c
451 @@ -0,0 +1,516 @@
452 +/*
453 + ****************************************************************************
454 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
455 + *
456 + * Unless you and Broadcom execute a separate written software license
457 + * agreement governing use of this software, this software is licensed to you
458 + * under the terms of the GNU General Public License version 2, available at
459 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
460 + *
461 + * Notwithstanding the above, under no circumstances may you combine this
462 + * software in any way with any other Broadcom software provided under a
463 + * license other than the GPL, without Broadcom's express prior written
464 + * consent.
465 + ****************************************************************************
466 + */
467 +
468 +/* ---- Include Files ----------------------------------------------------- */
469 +#include <linux/types.h>
470 +#include <linux/kernel.h>
471 +#include <linux/list.h>
472 +#include <linux/semaphore.h>
473 +#include <linux/mutex.h>
474 +#include <linux/slab.h>
475 +#include <linux/kthread.h>
476 +
477 +#include "vc_vchi_sm.h"
478 +
479 +#define VC_SM_VER 1
480 +#define VC_SM_MIN_VER 0
481 +
482 +/* ---- Private Constants and Types -------------------------------------- */
483 +
484 +/* Command blocks come from a pool */
485 +#define SM_MAX_NUM_CMD_RSP_BLKS 32
486 +
487 +struct sm_cmd_rsp_blk {
488 + struct list_head head; /* To create lists */
489 + struct semaphore sema; /* To be signaled when the response is there */
490 +
491 + uint16_t id;
492 + uint16_t length;
493 +
494 + uint8_t msg[VC_SM_MAX_MSG_LEN];
495 +
496 + uint32_t wait:1;
497 + uint32_t sent:1;
498 + uint32_t alloc:1;
499 +
500 +};
501 +
502 +struct sm_instance {
503 + uint32_t num_connections;
504 + VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
505 + struct task_struct *io_thread;
506 + struct semaphore io_sema;
507 +
508 + uint32_t trans_id;
509 +
510 + struct mutex lock;
511 + struct list_head cmd_list;
512 + struct list_head rsp_list;
513 + struct list_head dead_list;
514 +
515 + struct sm_cmd_rsp_blk free_blk[SM_MAX_NUM_CMD_RSP_BLKS];
516 + struct list_head free_list;
517 + struct mutex free_lock;
518 + struct semaphore free_sema;
519 +
520 +};
521 +
522 +/* ---- Private Variables ------------------------------------------------ */
523 +
524 +/* ---- Private Function Prototypes -------------------------------------- */
525 +
526 +/* ---- Private Functions ------------------------------------------------ */
527 +static int
528 +bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
529 + void *data,
530 + unsigned int size)
531 +{
532 + return vchi_queue_kernel_message(handle,
533 + data,
534 + size);
535 +}
536 +
537 +static struct
538 +sm_cmd_rsp_blk *vc_vchi_cmd_create(struct sm_instance *instance,
539 + enum vc_sm_msg_type id, void *msg,
540 + uint32_t size, int wait)
541 +{
542 + struct sm_cmd_rsp_blk *blk;
543 + struct vc_sm_msg_hdr_t *hdr;
544 +
545 + if (down_interruptible(&instance->free_sema)) {
546 + blk = kmalloc(sizeof(*blk), GFP_KERNEL);
547 + if (!blk)
548 + return NULL;
549 +
550 + blk->alloc = 1;
551 + sema_init(&blk->sema, 0);
552 + } else {
553 + mutex_lock(&instance->free_lock);
554 + blk =
555 + list_first_entry(&instance->free_list,
556 + struct sm_cmd_rsp_blk, head);
557 + list_del(&blk->head);
558 + mutex_unlock(&instance->free_lock);
559 + }
560 +
561 + blk->sent = 0;
562 + blk->wait = wait;
563 + blk->length = sizeof(*hdr) + size;
564 +
565 + hdr = (struct vc_sm_msg_hdr_t *) blk->msg;
566 + hdr->type = id;
567 + mutex_lock(&instance->lock);
568 + hdr->trans_id = blk->id = ++instance->trans_id;
569 + mutex_unlock(&instance->lock);
570 +
571 + if (size)
572 + memcpy(hdr->body, msg, size);
573 +
574 + return blk;
575 +}
576 +
577 +static void
578 +vc_vchi_cmd_delete(struct sm_instance *instance, struct sm_cmd_rsp_blk *blk)
579 +{
580 + if (blk->alloc) {
581 + kfree(blk);
582 + return;
583 + }
584 +
585 + mutex_lock(&instance->free_lock);
586 + list_add(&blk->head, &instance->free_list);
587 + mutex_unlock(&instance->free_lock);
588 + up(&instance->free_sema);
589 +}
590 +
591 +static int vc_vchi_sm_videocore_io(void *arg)
592 +{
593 + struct sm_instance *instance = arg;
594 + struct sm_cmd_rsp_blk *cmd = NULL, *cmd_tmp;
595 + struct vc_sm_result_t *reply;
596 + uint32_t reply_len;
597 + int32_t status;
598 + int svc_use = 1;
599 +
600 + while (1) {
601 + if (svc_use)
602 + vchi_service_release(instance->vchi_handle[0]);
603 + svc_use = 0;
604 + if (!down_interruptible(&instance->io_sema)) {
605 + vchi_service_use(instance->vchi_handle[0]);
606 + svc_use = 1;
607 +
608 + do {
609 + /*
610 + * Get new command and move it to response list
611 + */
612 + mutex_lock(&instance->lock);
613 + if (list_empty(&instance->cmd_list)) {
614 + /* no more commands to process */
615 + mutex_unlock(&instance->lock);
616 + break;
617 + }
618 + cmd =
619 + list_first_entry(&instance->cmd_list,
620 + struct sm_cmd_rsp_blk,
621 + head);
622 + list_move(&cmd->head, &instance->rsp_list);
623 + cmd->sent = 1;
624 + mutex_unlock(&instance->lock);
625 +
626 + /* Send the command */
627 + status = bcm2835_vchi_msg_queue(
628 + instance->vchi_handle[0],
629 + cmd->msg, cmd->length);
630 + if (status) {
631 + pr_err("%s: failed to queue message (%d)",
632 + __func__, status);
633 + }
634 +
635 + /* If no reply is needed then we're done */
636 + if (!cmd->wait) {
637 + mutex_lock(&instance->lock);
638 + list_del(&cmd->head);
639 + mutex_unlock(&instance->lock);
640 + vc_vchi_cmd_delete(instance, cmd);
641 + continue;
642 + }
643 +
644 + if (status) {
645 + up(&cmd->sema);
646 + continue;
647 + }
648 +
649 + } while (1);
650 +
651 + while (!vchi_msg_peek
652 + (instance->vchi_handle[0], (void **)&reply,
653 + &reply_len, VCHI_FLAGS_NONE)) {
654 + mutex_lock(&instance->lock);
655 + list_for_each_entry(cmd, &instance->rsp_list,
656 + head) {
657 + if (cmd->id == reply->trans_id)
658 + break;
659 + }
660 + mutex_unlock(&instance->lock);
661 +
662 + if (&cmd->head == &instance->rsp_list) {
663 + pr_debug("%s: received response %u, throw away...",
664 + __func__, reply->trans_id);
665 + } else if (reply_len > sizeof(cmd->msg)) {
666 + pr_err("%s: reply too big (%u) %u, throw away...",
667 + __func__, reply_len,
668 + reply->trans_id);
669 + } else {
670 + memcpy(cmd->msg, reply, reply_len);
671 + up(&cmd->sema);
672 + }
673 +
674 + vchi_msg_remove(instance->vchi_handle[0]);
675 + }
676 +
677 + /* Go through the dead list and free them */
678 + mutex_lock(&instance->lock);
679 + list_for_each_entry_safe(cmd, cmd_tmp,
680 + &instance->dead_list, head) {
681 + list_del(&cmd->head);
682 + vc_vchi_cmd_delete(instance, cmd);
683 + }
684 + mutex_unlock(&instance->lock);
685 + }
686 + }
687 +
688 + return 0;
689 +}
690 +
691 +static void vc_sm_vchi_callback(void *param,
692 + const VCHI_CALLBACK_REASON_T reason,
693 + void *msg_handle)
694 +{
695 + struct sm_instance *instance = param;
696 +
697 + (void)msg_handle;
698 +
699 + switch (reason) {
700 + case VCHI_CALLBACK_MSG_AVAILABLE:
701 + up(&instance->io_sema);
702 + break;
703 +
704 + case VCHI_CALLBACK_SERVICE_CLOSED:
705 + pr_info("%s: service CLOSED!!", __func__);
706 + default:
707 + break;
708 + }
709 +}
710 +
711 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
712 + VCHI_CONNECTION_T **vchi_connections,
713 + uint32_t num_connections)
714 +{
715 + uint32_t i;
716 + struct sm_instance *instance;
717 + int status;
718 +
719 + pr_debug("%s: start", __func__);
720 +
721 + if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
722 + pr_err("%s: unsupported number of connections %u (max=%u)",
723 + __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
724 +
725 + goto err_null;
726 + }
727 + /* Allocate memory for this instance */
728 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
729 +
730 + /* Misc initialisations */
731 + mutex_init(&instance->lock);
732 + sema_init(&instance->io_sema, 0);
733 + INIT_LIST_HEAD(&instance->cmd_list);
734 + INIT_LIST_HEAD(&instance->rsp_list);
735 + INIT_LIST_HEAD(&instance->dead_list);
736 + INIT_LIST_HEAD(&instance->free_list);
737 + sema_init(&instance->free_sema, SM_MAX_NUM_CMD_RSP_BLKS);
738 + mutex_init(&instance->free_lock);
739 + for (i = 0; i < SM_MAX_NUM_CMD_RSP_BLKS; i++) {
740 + sema_init(&instance->free_blk[i].sema, 0);
741 + list_add(&instance->free_blk[i].head, &instance->free_list);
742 + }
743 +
744 + /* Open the VCHI service connections */
745 + instance->num_connections = num_connections;
746 + for (i = 0; i < num_connections; i++) {
747 + SERVICE_CREATION_T params = {
748 + VCHI_VERSION_EX(VC_SM_VER, VC_SM_MIN_VER),
749 + VC_SM_SERVER_NAME,
750 + vchi_connections[i],
751 + 0,
752 + 0,
753 + vc_sm_vchi_callback,
754 + instance,
755 + 0,
756 + 0,
757 + 0,
758 + };
759 +
760 + status = vchi_service_open(vchi_instance,
761 + &params, &instance->vchi_handle[i]);
762 + if (status) {
763 + pr_err("%s: failed to open VCHI service (%d)",
764 + __func__, status);
765 +
766 + goto err_close_services;
767 + }
768 + }
769 +
770 + /* Create the thread which takes care of all io to/from videoocore. */
771 + instance->io_thread = kthread_create(&vc_vchi_sm_videocore_io,
772 + (void *)instance, "SMIO");
773 + if (instance->io_thread == NULL) {
774 + pr_err("%s: failed to create SMIO thread", __func__);
775 +
776 + goto err_close_services;
777 + }
778 + set_user_nice(instance->io_thread, -10);
779 + wake_up_process(instance->io_thread);
780 +
781 + pr_debug("%s: success - instance 0x%x", __func__,
782 + (unsigned int)instance);
783 + return instance;
784 +
785 +err_close_services:
786 + for (i = 0; i < instance->num_connections; i++) {
787 + if (instance->vchi_handle[i] != NULL)
788 + vchi_service_close(instance->vchi_handle[i]);
789 + }
790 + kfree(instance);
791 +err_null:
792 + pr_debug("%s: FAILED", __func__);
793 + return NULL;
794 +}
795 +
796 +int vc_vchi_sm_stop(struct sm_instance **handle)
797 +{
798 + struct sm_instance *instance;
799 + uint32_t i;
800 +
801 + if (handle == NULL) {
802 + pr_err("%s: invalid pointer to handle %p", __func__, handle);
803 + goto lock;
804 + }
805 +
806 + if (*handle == NULL) {
807 + pr_err("%s: invalid handle %p", __func__, *handle);
808 + goto lock;
809 + }
810 +
811 + instance = *handle;
812 +
813 + /* Close all VCHI service connections */
814 + for (i = 0; i < instance->num_connections; i++) {
815 + int32_t success;
816 +
817 + vchi_service_use(instance->vchi_handle[i]);
818 +
819 + success = vchi_service_close(instance->vchi_handle[i]);
820 + }
821 +
822 + kfree(instance);
823 +
824 + *handle = NULL;
825 + return 0;
826 +
827 +lock:
828 + return -EINVAL;
829 +}
830 +
831 +int vc_vchi_sm_send_msg(struct sm_instance *handle,
832 + enum vc_sm_msg_type msg_id,
833 + void *msg, uint32_t msg_size,
834 + void *result, uint32_t result_size,
835 + uint32_t *cur_trans_id, uint8_t wait_reply)
836 +{
837 + int status = 0;
838 + struct sm_instance *instance = handle;
839 + struct sm_cmd_rsp_blk *cmd_blk;
840 +
841 + if (handle == NULL) {
842 + pr_err("%s: invalid handle", __func__);
843 + return -EINVAL;
844 + }
845 + if (msg == NULL) {
846 + pr_err("%s: invalid msg pointer", __func__);
847 + return -EINVAL;
848 + }
849 +
850 + cmd_blk =
851 + vc_vchi_cmd_create(instance, msg_id, msg, msg_size, wait_reply);
852 + if (cmd_blk == NULL) {
853 + pr_err("[%s]: failed to allocate global tracking resource",
854 + __func__);
855 + return -ENOMEM;
856 + }
857 +
858 + if (cur_trans_id != NULL)
859 + *cur_trans_id = cmd_blk->id;
860 +
861 + mutex_lock(&instance->lock);
862 + list_add_tail(&cmd_blk->head, &instance->cmd_list);
863 + mutex_unlock(&instance->lock);
864 + up(&instance->io_sema);
865 +
866 + if (!wait_reply)
867 + /* We're done */
868 + return 0;
869 +
870 + /* Wait for the response */
871 + if (down_interruptible(&cmd_blk->sema)) {
872 + mutex_lock(&instance->lock);
873 + if (!cmd_blk->sent) {
874 + list_del(&cmd_blk->head);
875 + mutex_unlock(&instance->lock);
876 + vc_vchi_cmd_delete(instance, cmd_blk);
877 + return -ENXIO;
878 + }
879 + mutex_unlock(&instance->lock);
880 +
881 + mutex_lock(&instance->lock);
882 + list_move(&cmd_blk->head, &instance->dead_list);
883 + mutex_unlock(&instance->lock);
884 + up(&instance->io_sema);
885 + return -EINTR; /* We're done */
886 + }
887 +
888 + if (result && result_size) {
889 + memcpy(result, cmd_blk->msg, result_size);
890 + } else {
891 + struct vc_sm_result_t *res =
892 + (struct vc_sm_result_t *) cmd_blk->msg;
893 + status = (res->success == 0) ? 0 : -ENXIO;
894 + }
895 +
896 + mutex_lock(&instance->lock);
897 + list_del(&cmd_blk->head);
898 + mutex_unlock(&instance->lock);
899 + vc_vchi_cmd_delete(instance, cmd_blk);
900 + return status;
901 +}
902 +
903 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *msg,
904 + struct vc_sm_alloc_result_t *result,
905 + uint32_t *cur_trans_id)
906 +{
907 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ALLOC,
908 + msg, sizeof(*msg), result, sizeof(*result),
909 + cur_trans_id, 1);
910 +}
911 +
912 +int vc_vchi_sm_free(struct sm_instance *handle,
913 + struct vc_sm_free_t *msg, uint32_t *cur_trans_id)
914 +{
915 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_FREE,
916 + msg, sizeof(*msg), 0, 0, cur_trans_id, 0);
917 +}
918 +
919 +int vc_vchi_sm_lock(struct sm_instance *handle,
920 + struct vc_sm_lock_unlock_t *msg,
921 + struct vc_sm_lock_result_t *result,
922 + uint32_t *cur_trans_id)
923 +{
924 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_LOCK,
925 + msg, sizeof(*msg), result, sizeof(*result),
926 + cur_trans_id, 1);
927 +}
928 +
929 +int vc_vchi_sm_unlock(struct sm_instance *handle,
930 + struct vc_sm_lock_unlock_t *msg,
931 + uint32_t *cur_trans_id, uint8_t wait_reply)
932 +{
933 + return vc_vchi_sm_send_msg(handle, wait_reply ?
934 + VC_SM_MSG_TYPE_UNLOCK :
935 + VC_SM_MSG_TYPE_UNLOCK_NOANS, msg,
936 + sizeof(*msg), 0, 0, cur_trans_id,
937 + wait_reply);
938 +}
939 +
940 +int vc_vchi_sm_resize(struct sm_instance *handle, struct vc_sm_resize_t *msg,
941 + uint32_t *cur_trans_id)
942 +{
943 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_RESIZE,
944 + msg, sizeof(*msg), 0, 0, cur_trans_id, 1);
945 +}
946 +
947 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle)
948 +{
949 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_WALK_ALLOC,
950 + 0, 0, 0, 0, 0, 0);
951 +}
952 +
953 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
954 + struct vc_sm_action_clean_t *msg)
955 +{
956 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ACTION_CLEAN,
957 + msg, sizeof(*msg), 0, 0, 0, 0);
958 +}
959 +
960 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
961 + struct vc_sm_import_result *result,
962 + uint32_t *cur_trans_id)
963 +{
964 + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_IMPORT,
965 + msg, sizeof(*msg), result, sizeof(*result),
966 + cur_trans_id, 1);
967 +}
968 --- /dev/null
969 +++ b/drivers/char/broadcom/vc_sm/vc_vchi_sm.h
970 @@ -0,0 +1,102 @@
971 +/*
972 + ****************************************************************************
973 + * Copyright 2011 Broadcom Corporation. All rights reserved.
974 + *
975 + * Unless you and Broadcom execute a separate written software license
976 + * agreement governing use of this software, this software is licensed to you
977 + * under the terms of the GNU General Public License version 2, available at
978 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
979 + *
980 + * Notwithstanding the above, under no circumstances may you combine this
981 + * software in any way with any other Broadcom software provided under a
982 + * license other than the GPL, without Broadcom's express prior written
983 + * consent.
984 + ****************************************************************************
985 + */
986 +
987 +#ifndef __VC_VCHI_SM_H__INCLUDED__
988 +#define __VC_VCHI_SM_H__INCLUDED__
989 +
990 +#include "interface/vchi/vchi.h"
991 +
992 +#include "vc_sm_defs.h"
993 +
994 +/*
995 + * Forward declare.
996 + */
997 +struct sm_instance;
998 +
999 +/*
1000 + * Initialize the shared memory service, opens up vchi connection to talk to it.
1001 + */
1002 +struct sm_instance *vc_vchi_sm_init(VCHI_INSTANCE_T vchi_instance,
1003 + VCHI_CONNECTION_T **vchi_connections,
1004 + uint32_t num_connections);
1005 +
1006 +/*
1007 + * Terminates the shared memory service.
1008 + */
1009 +int vc_vchi_sm_stop(struct sm_instance **handle);
1010 +
1011 +/*
1012 + * Ask the shared memory service to allocate some memory on videocre and
1013 + * return the result of this allocation (which upon success will be a pointer
1014 + * to some memory in videocore space).
1015 + */
1016 +int vc_vchi_sm_alloc(struct sm_instance *handle, struct vc_sm_alloc_t *alloc,
1017 + struct vc_sm_alloc_result_t *alloc_result,
1018 + uint32_t *trans_id);
1019 +
1020 +/*
1021 + * Ask the shared memory service to free up some memory that was previously
1022 + * allocated by the vc_vchi_sm_alloc function call.
1023 + */
1024 +int vc_vchi_sm_free(struct sm_instance *handle,
1025 + struct vc_sm_free_t *free, uint32_t *trans_id);
1026 +
1027 +/*
1028 + * Ask the shared memory service to lock up some memory that was previously
1029 + * allocated by the vc_vchi_sm_alloc function call.
1030 + */
1031 +int vc_vchi_sm_lock(struct sm_instance *handle,
1032 + struct vc_sm_lock_unlock_t *lock_unlock,
1033 + struct vc_sm_lock_result_t *lock_result,
1034 + uint32_t *trans_id);
1035 +
1036 +/*
1037 + * Ask the shared memory service to unlock some memory that was previously
1038 + * allocated by the vc_vchi_sm_alloc function call.
1039 + */
1040 +int vc_vchi_sm_unlock(struct sm_instance *handle,
1041 + struct vc_sm_lock_unlock_t *lock_unlock,
1042 + uint32_t *trans_id, uint8_t wait_reply);
1043 +
1044 +/*
1045 + * Ask the shared memory service to resize some memory that was previously
1046 + * allocated by the vc_vchi_sm_alloc function call.
1047 + */
1048 +int vc_vchi_sm_resize(struct sm_instance *handle,
1049 + struct vc_sm_resize_t *resize, uint32_t *trans_id);
1050 +
1051 +/*
1052 + * Walk the allocated resources on the videocore side, the allocation will
1053 + * show up in the log. This is purely for debug/information and takes no
1054 + * specific actions.
1055 + */
1056 +int vc_vchi_sm_walk_alloc(struct sm_instance *handle);
1057 +
1058 +/*
1059 + * Clean up following a previously interrupted action which left the system
1060 + * in a bad state of some sort.
1061 + */
1062 +int vc_vchi_sm_clean_up(struct sm_instance *handle,
1063 + struct vc_sm_action_clean_t *action_clean);
1064 +
1065 +/*
1066 + * Import a contiguous block of memory and wrap it in a GPU MEM_HANDLE_T.
1067 + */
1068 +int vc_vchi_sm_import(struct sm_instance *handle, struct vc_sm_import *msg,
1069 + struct vc_sm_import_result *result,
1070 + uint32_t *cur_trans_id);
1071 +
1072 +#endif /* __VC_VCHI_SM_H__INCLUDED__ */
1073 --- /dev/null
1074 +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c
1075 @@ -0,0 +1,3493 @@
1076 +/*
1077 + ****************************************************************************
1078 + * Copyright 2011-2012 Broadcom Corporation. All rights reserved.
1079 + *
1080 + * Unless you and Broadcom execute a separate written software license
1081 + * agreement governing use of this software, this software is licensed to you
1082 + * under the terms of the GNU General Public License version 2, available at
1083 + * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
1084 + *
1085 + * Notwithstanding the above, under no circumstances may you combine this
1086 + * software in any way with any other Broadcom software provided under a
1087 + * license other than the GPL, without Broadcom's express prior written
1088 + * consent.
1089 + ****************************************************************************
1090 + */
1091 +
1092 +/* ---- Include Files ----------------------------------------------------- */
1093 +
1094 +#include <linux/cdev.h>
1095 +#include <linux/broadcom/vc_mem.h>
1096 +#include <linux/device.h>
1097 +#include <linux/debugfs.h>
1098 +#include <linux/dma-mapping.h>
1099 +#include <linux/dma-buf.h>
1100 +#include <linux/errno.h>
1101 +#include <linux/fs.h>
1102 +#include <linux/hugetlb.h>
1103 +#include <linux/ioctl.h>
1104 +#include <linux/kernel.h>
1105 +#include <linux/list.h>
1106 +#include <linux/module.h>
1107 +#include <linux/mm.h>
1108 +#include <linux/of.h>
1109 +#include <linux/platform_device.h>
1110 +#include <linux/pfn.h>
1111 +#include <linux/proc_fs.h>
1112 +#include <linux/pagemap.h>
1113 +#include <linux/semaphore.h>
1114 +#include <linux/slab.h>
1115 +#include <linux/seq_file.h>
1116 +#include <linux/types.h>
1117 +#include <asm/cacheflush.h>
1118 +
1119 +#include "vchiq_connected.h"
1120 +#include "vc_vchi_sm.h"
1121 +
1122 +#include <linux/broadcom/vmcs_sm_ioctl.h>
1123 +#include "vc_sm_knl.h"
1124 +
1125 +/* ---- Private Constants and Types --------------------------------------- */
1126 +
1127 +#define DEVICE_NAME "vcsm"
1128 +#define DRIVER_NAME "bcm2835-vcsm"
1129 +#define DEVICE_MINOR 0
1130 +
1131 +#define VC_SM_DIR_ROOT_NAME "vc-smem"
1132 +#define VC_SM_DIR_ALLOC_NAME "alloc"
1133 +#define VC_SM_STATE "state"
1134 +#define VC_SM_STATS "statistics"
1135 +#define VC_SM_RESOURCES "resources"
1136 +#define VC_SM_DEBUG "debug"
1137 +#define VC_SM_WRITE_BUF_SIZE 128
1138 +
1139 +/* Statistics tracked per resource and globally. */
1140 +enum sm_stats_t {
1141 + /* Attempt. */
1142 + ALLOC,
1143 + FREE,
1144 + LOCK,
1145 + UNLOCK,
1146 + MAP,
1147 + FLUSH,
1148 + INVALID,
1149 + IMPORT,
1150 +
1151 + END_ATTEMPT,
1152 +
1153 + /* Failure. */
1154 + ALLOC_FAIL,
1155 + FREE_FAIL,
1156 + LOCK_FAIL,
1157 + UNLOCK_FAIL,
1158 + MAP_FAIL,
1159 + FLUSH_FAIL,
1160 + INVALID_FAIL,
1161 + IMPORT_FAIL,
1162 +
1163 + END_ALL,
1164 +
1165 +};
1166 +
1167 +static const char *const sm_stats_human_read[] = {
1168 + "Alloc",
1169 + "Free",
1170 + "Lock",
1171 + "Unlock",
1172 + "Map",
1173 + "Cache Flush",
1174 + "Cache Invalidate",
1175 + "Import",
1176 +};
1177 +
1178 +typedef int (*VC_SM_SHOW) (struct seq_file *s, void *v);
1179 +struct sm_pde_t {
1180 + VC_SM_SHOW show; /* Debug fs function hookup. */
1181 + struct dentry *dir_entry; /* Debug fs directory entry. */
1182 + void *priv_data; /* Private data */
1183 +
1184 +};
1185 +
1186 +/* Single resource allocation tracked for all devices. */
1187 +struct sm_mmap {
1188 + struct list_head map_list; /* Linked list of maps. */
1189 +
1190 + struct sm_resource_t *resource; /* Pointer to the resource. */
1191 +
1192 + pid_t res_pid; /* PID owning that resource. */
1193 + unsigned int res_vc_hdl; /* Resource handle (videocore). */
1194 + unsigned int res_usr_hdl; /* Resource handle (user). */
1195 +
1196 + unsigned long res_addr; /* Mapped virtual address. */
1197 + struct vm_area_struct *vma; /* VM area for this mapping. */
1198 + unsigned int ref_count; /* Reference count to this vma. */
1199 +
1200 + /* Used to link maps associated with a resource. */
1201 + struct list_head resource_map_list;
1202 +};
1203 +
1204 +/* Single resource allocation tracked for each opened device. */
1205 +struct sm_resource_t {
1206 + struct list_head resource_list; /* List of resources. */
1207 + struct list_head global_resource_list; /* Global list of resources. */
1208 +
1209 + pid_t pid; /* PID owning that resource. */
1210 + uint32_t res_guid; /* Unique identifier. */
1211 + uint32_t lock_count; /* Lock count for this resource. */
1212 + uint32_t ref_count; /* Ref count for this resource. */
1213 +
1214 + uint32_t res_handle; /* Resource allocation handle. */
1215 + void *res_base_mem; /* Resource base memory address. */
1216 + uint32_t res_size; /* Resource size allocated. */
1217 + enum vmcs_sm_cache_e res_cached; /* Resource cache type. */
1218 + struct sm_resource_t *res_shared; /* Shared resource */
1219 +
1220 + enum sm_stats_t res_stats[END_ALL]; /* Resource statistics. */
1221 +
1222 + uint8_t map_count; /* Counter of mappings for this resource. */
1223 + struct list_head map_list; /* Maps associated with a resource. */
1224 +
1225 + /* DMABUF related fields */
1226 + struct dma_buf *dma_buf;
1227 + struct dma_buf_attachment *attach;
1228 + struct sg_table *sgt;
1229 + dma_addr_t dma_addr;
1230 +
1231 + struct sm_priv_data_t *private;
1232 + bool map; /* whether to map pages up front */
1233 +};
1234 +
1235 +/* Private file data associated with each opened device. */
1236 +struct sm_priv_data_t {
1237 + struct list_head resource_list; /* List of resources. */
1238 +
1239 + pid_t pid; /* PID of creator. */
1240 +
1241 + struct dentry *dir_pid; /* Debug fs entries root. */
1242 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1243 + struct sm_pde_t dir_res; /* Debug fs resource sub-tree. */
1244 +
1245 + int restart_sys; /* Tracks restart on interrupt. */
1246 + enum vc_sm_msg_type int_action; /* Interrupted action. */
1247 + uint32_t int_trans_id; /* Interrupted transaction. */
1248 +
1249 +};
1250 +
1251 +/* Global state information. */
1252 +struct sm_state_t {
1253 + struct platform_device *pdev;
1254 + struct sm_instance *sm_handle; /* Handle for videocore service. */
1255 + struct dentry *dir_root; /* Debug fs entries root. */
1256 + struct dentry *dir_alloc; /* Debug fs entries allocations. */
1257 + struct sm_pde_t dir_stats; /* Debug fs entries statistics sub-tree. */
1258 + struct sm_pde_t dir_state; /* Debug fs entries state sub-tree. */
1259 + struct dentry *debug; /* Debug fs entries debug. */
1260 +
1261 + struct mutex map_lock; /* Global map lock. */
1262 + struct list_head map_list; /* List of maps. */
1263 + struct list_head resource_list; /* List of resources. */
1264 +
1265 + enum sm_stats_t deceased[END_ALL]; /* Natural termination stats. */
1266 + enum sm_stats_t terminated[END_ALL]; /* Forced termination stats. */
1267 + uint32_t res_deceased_cnt; /* Natural termination counter. */
1268 + uint32_t res_terminated_cnt; /* Forced termination counter. */
1269 +
1270 + struct cdev sm_cdev; /* Device. */
1271 + dev_t sm_devid; /* Device identifier. */
1272 + struct class *sm_class; /* Class. */
1273 + struct device *sm_dev; /* Device. */
1274 +
1275 + struct sm_priv_data_t *data_knl; /* Kernel internal data tracking. */
1276 +
1277 + struct mutex lock; /* Global lock. */
1278 + uint32_t guid; /* GUID (next) tracker. */
1279 +
1280 +};
1281 +
1282 +/* ---- Private Variables ----------------------------------------------- */
1283 +
1284 +static struct sm_state_t *sm_state;
1285 +static int sm_inited;
1286 +
1287 +#if 0
1288 +static const char *const sm_cache_map_vector[] = {
1289 + "(null)",
1290 + "host",
1291 + "videocore",
1292 + "host+videocore",
1293 +};
1294 +#endif
1295 +
1296 +/* ---- Private Function Prototypes -------------------------------------- */
1297 +
1298 +/* ---- Private Functions ------------------------------------------------ */
1299 +
1300 +static inline unsigned int vcaddr_to_pfn(unsigned long vc_addr)
1301 +{
1302 + unsigned long pfn = vc_addr & 0x3FFFFFFF;
1303 +
1304 + pfn += mm_vc_mem_phys_addr;
1305 + pfn >>= PAGE_SHIFT;
1306 + return pfn;
1307 +}
1308 +
1309 +/*
1310 + * Carries over to the state statistics the statistics once owned by a deceased
1311 + * resource.
1312 + */
1313 +static void vc_sm_resource_deceased(struct sm_resource_t *p_res, int terminated)
1314 +{
1315 + if (sm_state != NULL) {
1316 + if (p_res != NULL) {
1317 + int ix;
1318 +
1319 + if (terminated)
1320 + sm_state->res_terminated_cnt++;
1321 + else
1322 + sm_state->res_deceased_cnt++;
1323 +
1324 + for (ix = 0; ix < END_ALL; ix++) {
1325 + if (terminated)
1326 + sm_state->terminated[ix] +=
1327 + p_res->res_stats[ix];
1328 + else
1329 + sm_state->deceased[ix] +=
1330 + p_res->res_stats[ix];
1331 + }
1332 + }
1333 + }
1334 +}
1335 +
1336 +/*
1337 + * Fetch a videocore handle corresponding to a mapping of the pid+address
1338 + * returns 0 (ie NULL) if no such handle exists in the global map.
1339 + */
1340 +static unsigned int vmcs_sm_vc_handle_from_pid_and_address(unsigned int pid,
1341 + unsigned int addr)
1342 +{
1343 + struct sm_mmap *map = NULL;
1344 + unsigned int handle = 0;
1345 +
1346 + if (!sm_state || addr == 0)
1347 + goto out;
1348 +
1349 + mutex_lock(&(sm_state->map_lock));
1350 +
1351 + /* Lookup the resource. */
1352 + if (!list_empty(&sm_state->map_list)) {
1353 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1354 + if (map->res_pid != pid || map->res_addr != addr)
1355 + continue;
1356 +
1357 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> vc-hdl %x (usr-hdl %x)\n",
1358 + __func__, map, map->res_pid, map->res_addr,
1359 + map->res_vc_hdl, map->res_usr_hdl);
1360 +
1361 + handle = map->res_vc_hdl;
1362 + break;
1363 + }
1364 + }
1365 +
1366 + mutex_unlock(&(sm_state->map_lock));
1367 +
1368 +out:
1369 + /*
1370 + * Use a debug log here as it may be a valid situation that we query
1371 + * for something that is not mapped, we do not want a kernel log each
1372 + * time around.
1373 + *
1374 + * There are other error log that would pop up accordingly if someone
1375 + * subsequently tries to use something invalid after being told not to
1376 + * use it...
1377 + */
1378 + if (handle == 0) {
1379 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1380 + __func__, pid, addr);
1381 + }
1382 +
1383 + return handle;
1384 +}
1385 +
1386 +/*
1387 + * Fetch a user handle corresponding to a mapping of the pid+address
1388 + * returns 0 (ie NULL) if no such handle exists in the global map.
1389 + */
1390 +static unsigned int vmcs_sm_usr_handle_from_pid_and_address(unsigned int pid,
1391 + unsigned int addr)
1392 +{
1393 + struct sm_mmap *map = NULL;
1394 + unsigned int handle = 0;
1395 +
1396 + if (!sm_state || addr == 0)
1397 + goto out;
1398 +
1399 + mutex_lock(&(sm_state->map_lock));
1400 +
1401 + /* Lookup the resource. */
1402 + if (!list_empty(&sm_state->map_list)) {
1403 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1404 + if (map->res_pid != pid || map->res_addr != addr)
1405 + continue;
1406 +
1407 + pr_debug("[%s]: global map %p (pid %u, addr %lx) -> usr-hdl %x (vc-hdl %x)\n",
1408 + __func__, map, map->res_pid, map->res_addr,
1409 + map->res_usr_hdl, map->res_vc_hdl);
1410 +
1411 + handle = map->res_usr_hdl;
1412 + break;
1413 + }
1414 + }
1415 +
1416 + mutex_unlock(&(sm_state->map_lock));
1417 +
1418 +out:
1419 + /*
1420 + * Use a debug log here as it may be a valid situation that we query
1421 + * for something that is not mapped yet.
1422 + *
1423 + * There are other error log that would pop up accordingly if someone
1424 + * subsequently tries to use something invalid after being told not to
1425 + * use it...
1426 + */
1427 + if (handle == 0)
1428 + pr_debug("[%s]: not a valid map (pid %u, addr %x)\n",
1429 + __func__, pid, addr);
1430 +
1431 + return handle;
1432 +}
1433 +
1434 +#if defined(DO_NOT_USE)
1435 +/*
1436 + * Fetch an address corresponding to a mapping of the pid+handle
1437 + * returns 0 (ie NULL) if no such address exists in the global map.
1438 + */
1439 +static unsigned int vmcs_sm_usr_address_from_pid_and_vc_handle(unsigned int pid,
1440 + unsigned int hdl)
1441 +{
1442 + struct sm_mmap *map = NULL;
1443 + unsigned int addr = 0;
1444 +
1445 + if (sm_state == NULL || hdl == 0)
1446 + goto out;
1447 +
1448 + mutex_lock(&(sm_state->map_lock));
1449 +
1450 + /* Lookup the resource. */
1451 + if (!list_empty(&sm_state->map_list)) {
1452 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1453 + if (map->res_pid != pid || map->res_vc_hdl != hdl)
1454 + continue;
1455 +
1456 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1457 + __func__, map, map->res_pid, map->res_vc_hdl,
1458 + map->res_usr_hdl, map->res_addr);
1459 +
1460 + addr = map->res_addr;
1461 + break;
1462 + }
1463 + }
1464 +
1465 + mutex_unlock(&(sm_state->map_lock));
1466 +
1467 +out:
1468 + /*
1469 + * Use a debug log here as it may be a valid situation that we query
1470 + * for something that is not mapped, we do not want a kernel log each
1471 + * time around.
1472 + *
1473 + * There are other error log that would pop up accordingly if someone
1474 + * subsequently tries to use something invalid after being told not to
1475 + * use it...
1476 + */
1477 + if (addr == 0)
1478 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n",
1479 + __func__, pid, hdl);
1480 +
1481 + return addr;
1482 +}
1483 +#endif
1484 +
1485 +/*
1486 + * Fetch an address corresponding to a mapping of the pid+handle
1487 + * returns 0 (ie NULL) if no such address exists in the global map.
1488 + */
1489 +static unsigned int vmcs_sm_usr_address_from_pid_and_usr_handle(unsigned int
1490 + pid,
1491 + unsigned int
1492 + hdl)
1493 +{
1494 + struct sm_mmap *map = NULL;
1495 + unsigned int addr = 0;
1496 +
1497 + if (sm_state == NULL || hdl == 0)
1498 + goto out;
1499 +
1500 + mutex_lock(&(sm_state->map_lock));
1501 +
1502 + /* Lookup the resource. */
1503 + if (!list_empty(&sm_state->map_list)) {
1504 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1505 + if (map->res_pid != pid || map->res_usr_hdl != hdl)
1506 + continue;
1507 +
1508 + pr_debug("[%s]: global map %p (pid %u, vc-hdl %x, usr-hdl %x) -> addr %lx\n",
1509 + __func__, map, map->res_pid, map->res_vc_hdl,
1510 + map->res_usr_hdl, map->res_addr);
1511 +
1512 + addr = map->res_addr;
1513 + break;
1514 + }
1515 + }
1516 +
1517 + mutex_unlock(&(sm_state->map_lock));
1518 +
1519 +out:
1520 + /*
1521 + * Use a debug log here as it may be a valid situation that we query
1522 + * for something that is not mapped, we do not want a kernel log each
1523 + * time around.
1524 + *
1525 + * There are other error log that would pop up accordingly if someone
1526 + * subsequently tries to use something invalid after being told not to
1527 + * use it...
1528 + */
1529 + if (addr == 0)
1530 + pr_debug("[%s]: not a valid map (pid %u, hdl %x)\n", __func__,
1531 + pid, hdl);
1532 +
1533 + return addr;
1534 +}
1535 +
1536 +/* Adds a resource mapping to the global data list. */
1537 +static void vmcs_sm_add_map(struct sm_state_t *state,
1538 + struct sm_resource_t *resource, struct sm_mmap *map)
1539 +{
1540 + mutex_lock(&(state->map_lock));
1541 +
1542 + /* Add to the global list of mappings */
1543 + list_add(&map->map_list, &state->map_list);
1544 +
1545 + /* Add to the list of mappings for this resource */
1546 + list_add(&map->resource_map_list, &resource->map_list);
1547 + resource->map_count++;
1548 +
1549 + mutex_unlock(&(state->map_lock));
1550 +
1551 + pr_debug("[%s]: added map %p (pid %u, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1552 + __func__, map, map->res_pid, map->res_vc_hdl,
1553 + map->res_usr_hdl, map->res_addr);
1554 +}
1555 +
1556 +/* Removes a resource mapping from the global data list. */
1557 +static void vmcs_sm_remove_map(struct sm_state_t *state,
1558 + struct sm_resource_t *resource,
1559 + struct sm_mmap *map)
1560 +{
1561 + mutex_lock(&(state->map_lock));
1562 +
1563 + /* Remove from the global list of mappings */
1564 + list_del(&map->map_list);
1565 +
1566 + /* Remove from the list of mapping for this resource */
1567 + list_del(&map->resource_map_list);
1568 + if (resource->map_count > 0)
1569 + resource->map_count--;
1570 +
1571 + mutex_unlock(&(state->map_lock));
1572 +
1573 + pr_debug("[%s]: removed map %p (pid %d, vc-hdl %x, usr-hdl %x, addr %lx)\n",
1574 + __func__, map, map->res_pid, map->res_vc_hdl, map->res_usr_hdl,
1575 + map->res_addr);
1576 +
1577 + kfree(map);
1578 +}
1579 +
1580 +/* Read callback for the global state proc entry. */
1581 +static int vc_sm_global_state_show(struct seq_file *s, void *v)
1582 +{
1583 + struct sm_mmap *map = NULL;
1584 + struct sm_resource_t *resource = NULL;
1585 + int map_count = 0;
1586 + int resource_count = 0;
1587 +
1588 + if (sm_state == NULL)
1589 + return 0;
1590 +
1591 + seq_printf(s, "\nVC-ServiceHandle 0x%x\n",
1592 + (unsigned int)sm_state->sm_handle);
1593 +
1594 + /* Log all applicable mapping(s). */
1595 +
1596 + mutex_lock(&(sm_state->map_lock));
1597 + seq_puts(s, "\nResources\n");
1598 + if (!list_empty(&sm_state->resource_list)) {
1599 + list_for_each_entry(resource, &sm_state->resource_list,
1600 + global_resource_list) {
1601 + resource_count++;
1602 +
1603 + seq_printf(s, "\nResource %p\n",
1604 + resource);
1605 + seq_printf(s, " PID %u\n",
1606 + resource->pid);
1607 + seq_printf(s, " RES_GUID 0x%x\n",
1608 + resource->res_guid);
1609 + seq_printf(s, " LOCK_COUNT %u\n",
1610 + resource->lock_count);
1611 + seq_printf(s, " REF_COUNT %u\n",
1612 + resource->ref_count);
1613 + seq_printf(s, " res_handle 0x%X\n",
1614 + resource->res_handle);
1615 + seq_printf(s, " res_base_mem %p\n",
1616 + resource->res_base_mem);
1617 + seq_printf(s, " SIZE %d\n",
1618 + resource->res_size);
1619 + seq_printf(s, " DMABUF %p\n",
1620 + resource->dma_buf);
1621 + seq_printf(s, " ATTACH %p\n",
1622 + resource->attach);
1623 + seq_printf(s, " SGT %p\n",
1624 + resource->sgt);
1625 + seq_printf(s, " DMA_ADDR %pad\n",
1626 + &resource->dma_addr);
1627 + }
1628 + }
1629 + seq_printf(s, "\n\nTotal resource count: %d\n\n", resource_count);
1630 +
1631 + seq_puts(s, "\nMappings\n");
1632 + if (!list_empty(&sm_state->map_list)) {
1633 + list_for_each_entry(map, &sm_state->map_list, map_list) {
1634 + map_count++;
1635 +
1636 + seq_printf(s, "\nMapping 0x%x\n",
1637 + (unsigned int)map);
1638 + seq_printf(s, " TGID %u\n",
1639 + map->res_pid);
1640 + seq_printf(s, " VC-HDL 0x%x\n",
1641 + map->res_vc_hdl);
1642 + seq_printf(s, " USR-HDL 0x%x\n",
1643 + map->res_usr_hdl);
1644 + seq_printf(s, " USR-ADDR 0x%lx\n",
1645 + map->res_addr);
1646 + seq_printf(s, " SIZE %d\n",
1647 + map->resource->res_size);
1648 + }
1649 + }
1650 +
1651 + mutex_unlock(&(sm_state->map_lock));
1652 + seq_printf(s, "\n\nTotal map count: %d\n\n", map_count);
1653 +
1654 + return 0;
1655 +}
1656 +
1657 +static int vc_sm_global_statistics_show(struct seq_file *s, void *v)
1658 +{
1659 + int ix;
1660 +
1661 + /* Global state tracked statistics. */
1662 + if (sm_state != NULL) {
1663 + seq_puts(s, "\nDeceased Resources Statistics\n");
1664 +
1665 + seq_printf(s, "\nNatural Cause (%u occurences)\n",
1666 + sm_state->res_deceased_cnt);
1667 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1668 + if (sm_state->deceased[ix] > 0) {
1669 + seq_printf(s, " %u\t%s\n",
1670 + sm_state->deceased[ix],
1671 + sm_stats_human_read[ix]);
1672 + }
1673 + }
1674 + seq_puts(s, "\n");
1675 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1676 + if (sm_state->deceased[ix + END_ATTEMPT] > 0) {
1677 + seq_printf(s, " %u\tFAILED %s\n",
1678 + sm_state->deceased[ix + END_ATTEMPT],
1679 + sm_stats_human_read[ix]);
1680 + }
1681 + }
1682 +
1683 + seq_printf(s, "\nForcefull (%u occurences)\n",
1684 + sm_state->res_terminated_cnt);
1685 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1686 + if (sm_state->terminated[ix] > 0) {
1687 + seq_printf(s, " %u\t%s\n",
1688 + sm_state->terminated[ix],
1689 + sm_stats_human_read[ix]);
1690 + }
1691 + }
1692 + seq_puts(s, "\n");
1693 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1694 + if (sm_state->terminated[ix + END_ATTEMPT] > 0) {
1695 + seq_printf(s, " %u\tFAILED %s\n",
1696 + sm_state->terminated[ix +
1697 + END_ATTEMPT],
1698 + sm_stats_human_read[ix]);
1699 + }
1700 + }
1701 + }
1702 +
1703 + return 0;
1704 +}
1705 +
1706 +#if 0
1707 +/* Read callback for the statistics proc entry. */
1708 +static int vc_sm_statistics_show(struct seq_file *s, void *v)
1709 +{
1710 + int ix;
1711 + struct sm_priv_data_t *file_data;
1712 + struct sm_resource_t *resource;
1713 + int res_count = 0;
1714 + struct sm_pde_t *p_pde;
1715 +
1716 + p_pde = (struct sm_pde_t *)(s->private);
1717 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1718 +
1719 + if (file_data == NULL)
1720 + return 0;
1721 +
1722 + /* Per process statistics. */
1723 +
1724 + seq_printf(s, "\nStatistics for TGID %d\n", file_data->pid);
1725 +
1726 + mutex_lock(&(sm_state->map_lock));
1727 +
1728 + if (!list_empty(&file_data->resource_list)) {
1729 + list_for_each_entry(resource, &file_data->resource_list,
1730 + resource_list) {
1731 + res_count++;
1732 +
1733 + seq_printf(s, "\nGUID: 0x%x\n\n",
1734 + resource->res_guid);
1735 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1736 + if (resource->res_stats[ix] > 0) {
1737 + seq_printf(s,
1738 + " %u\t%s\n",
1739 + resource->res_stats[ix],
1740 + sm_stats_human_read[ix]);
1741 + }
1742 + }
1743 + seq_puts(s, "\n");
1744 + for (ix = 0; ix < END_ATTEMPT; ix++) {
1745 + if (resource->res_stats[ix + END_ATTEMPT] > 0) {
1746 + seq_printf(s,
1747 + " %u\tFAILED %s\n",
1748 + resource->res_stats[
1749 + ix + END_ATTEMPT],
1750 + sm_stats_human_read[ix]);
1751 + }
1752 + }
1753 + }
1754 + }
1755 +
1756 + mutex_unlock(&(sm_state->map_lock));
1757 +
1758 + seq_printf(s, "\nResources Count %d\n", res_count);
1759 +
1760 + return 0;
1761 +}
1762 +#endif
1763 +
1764 +#if 0
1765 +/* Read callback for the allocation proc entry. */
1766 +static int vc_sm_alloc_show(struct seq_file *s, void *v)
1767 +{
1768 + struct sm_priv_data_t *file_data;
1769 + struct sm_resource_t *resource;
1770 + int alloc_count = 0;
1771 + struct sm_pde_t *p_pde;
1772 +
1773 + p_pde = (struct sm_pde_t *)(s->private);
1774 + file_data = (struct sm_priv_data_t *)(p_pde->priv_data);
1775 +
1776 + if (!file_data)
1777 + return 0;
1778 +
1779 + /* Per process statistics. */
1780 + seq_printf(s, "\nAllocation for TGID %d\n", file_data->pid);
1781 +
1782 + mutex_lock(&(sm_state->map_lock));
1783 +
1784 + if (!list_empty(&file_data->resource_list)) {
1785 + list_for_each_entry(resource, &file_data->resource_list,
1786 + resource_list) {
1787 + alloc_count++;
1788 +
1789 + seq_printf(s, "\nGUID: 0x%x\n",
1790 + resource->res_guid);
1791 + seq_printf(s, "Lock Count: %u\n",
1792 + resource->lock_count);
1793 + seq_printf(s, "Mapped: %s\n",
1794 + (resource->map_count ? "yes" : "no"));
1795 + seq_printf(s, "VC-handle: 0x%x\n",
1796 + resource->res_handle);
1797 + seq_printf(s, "VC-address: 0x%p\n",
1798 + resource->res_base_mem);
1799 + seq_printf(s, "VC-size (bytes): %u\n",
1800 + resource->res_size);
1801 + seq_printf(s, "Cache: %s\n",
1802 + sm_cache_map_vector[resource->res_cached]);
1803 + }
1804 + }
1805 +
1806 + mutex_unlock(&(sm_state->map_lock));
1807 +
1808 + seq_printf(s, "\n\nTotal allocation count: %d\n\n", alloc_count);
1809 +
1810 + return 0;
1811 +}
1812 +#endif
1813 +
1814 +static int vc_sm_seq_file_show(struct seq_file *s, void *v)
1815 +{
1816 + struct sm_pde_t *sm_pde;
1817 +
1818 + sm_pde = (struct sm_pde_t *)(s->private);
1819 +
1820 + if (sm_pde && sm_pde->show)
1821 + sm_pde->show(s, v);
1822 +
1823 + return 0;
1824 +}
1825 +
1826 +static int vc_sm_single_open(struct inode *inode, struct file *file)
1827 +{
1828 + return single_open(file, vc_sm_seq_file_show, inode->i_private);
1829 +}
1830 +
1831 +static const struct file_operations vc_sm_debug_fs_fops = {
1832 + .open = vc_sm_single_open,
1833 + .read = seq_read,
1834 + .llseek = seq_lseek,
1835 + .release = single_release,
1836 +};
1837 +
1838 +/*
1839 + * Adds a resource to the private data list which tracks all the allocated
1840 + * data.
1841 + */
1842 +static void vmcs_sm_add_resource(struct sm_priv_data_t *privdata,
1843 + struct sm_resource_t *resource)
1844 +{
1845 + mutex_lock(&(sm_state->map_lock));
1846 + list_add(&resource->resource_list, &privdata->resource_list);
1847 + list_add(&resource->global_resource_list, &sm_state->resource_list);
1848 + mutex_unlock(&(sm_state->map_lock));
1849 +
1850 + pr_debug("[%s]: added resource %p (base addr %p, hdl %x, size %u, cache %u)\n",
1851 + __func__, resource, resource->res_base_mem,
1852 + resource->res_handle, resource->res_size, resource->res_cached);
1853 +}
1854 +
1855 +/*
1856 + * Locates a resource and acquire a reference on it.
1857 + * The resource won't be deleted while there is a reference on it.
1858 + */
1859 +static struct sm_resource_t *vmcs_sm_acquire_resource(struct sm_priv_data_t
1860 + *private,
1861 + unsigned int res_guid)
1862 +{
1863 + struct sm_resource_t *resource, *ret = NULL;
1864 +
1865 + mutex_lock(&(sm_state->map_lock));
1866 +
1867 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1868 + if (resource->res_guid != res_guid)
1869 + continue;
1870 +
1871 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1872 + __func__, resource, resource->res_guid,
1873 + resource->res_base_mem, resource->res_handle,
1874 + resource->res_size, resource->res_cached);
1875 + resource->ref_count++;
1876 + ret = resource;
1877 + break;
1878 + }
1879 +
1880 + mutex_unlock(&(sm_state->map_lock));
1881 +
1882 + return ret;
1883 +}
1884 +
1885 +/*
1886 + * Locates a resource and acquire a reference on it.
1887 + * The resource won't be deleted while there is a reference on it.
1888 + */
1889 +static struct sm_resource_t *vmcs_sm_acquire_first_resource(
1890 + struct sm_priv_data_t *private)
1891 +{
1892 + struct sm_resource_t *resource, *ret = NULL;
1893 +
1894 + mutex_lock(&(sm_state->map_lock));
1895 +
1896 + list_for_each_entry(resource, &private->resource_list, resource_list) {
1897 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1898 + __func__, resource, resource->res_guid,
1899 + resource->res_base_mem, resource->res_handle,
1900 + resource->res_size, resource->res_cached);
1901 + resource->ref_count++;
1902 + ret = resource;
1903 + break;
1904 + }
1905 +
1906 + mutex_unlock(&(sm_state->map_lock));
1907 +
1908 + return ret;
1909 +}
1910 +
1911 +/*
1912 + * Locates a resource and acquire a reference on it.
1913 + * The resource won't be deleted while there is a reference on it.
1914 + */
1915 +static struct sm_resource_t *vmcs_sm_acquire_global_resource(unsigned int
1916 + res_guid)
1917 +{
1918 + struct sm_resource_t *resource, *ret = NULL;
1919 +
1920 + mutex_lock(&(sm_state->map_lock));
1921 +
1922 + list_for_each_entry(resource, &sm_state->resource_list,
1923 + global_resource_list) {
1924 + if (resource->res_guid != res_guid)
1925 + continue;
1926 +
1927 + pr_debug("[%s]: located resource %p (guid: %x, base addr %p, hdl %x, size %u, cache %u)\n",
1928 + __func__, resource, resource->res_guid,
1929 + resource->res_base_mem, resource->res_handle,
1930 + resource->res_size, resource->res_cached);
1931 + resource->ref_count++;
1932 + ret = resource;
1933 + break;
1934 + }
1935 +
1936 + mutex_unlock(&(sm_state->map_lock));
1937 +
1938 + return ret;
1939 +}
1940 +
1941 +/*
1942 + * Release a previously acquired resource.
1943 + * The resource will be deleted when its refcount reaches 0.
1944 + */
1945 +static void vmcs_sm_release_resource(struct sm_resource_t *resource, int force)
1946 +{
1947 + struct sm_priv_data_t *private = resource->private;
1948 + struct sm_mmap *map, *map_tmp;
1949 + struct sm_resource_t *res_tmp;
1950 + int ret;
1951 +
1952 + mutex_lock(&(sm_state->map_lock));
1953 +
1954 + if (--resource->ref_count) {
1955 + if (force)
1956 + pr_err("[%s]: resource %p in use\n", __func__, resource);
1957 +
1958 + mutex_unlock(&(sm_state->map_lock));
1959 + return;
1960 + }
1961 +
1962 + /* Time to free the resource. Start by removing it from the list */
1963 + list_del(&resource->resource_list);
1964 + list_del(&resource->global_resource_list);
1965 +
1966 + /*
1967 + * Walk the global resource list, find out if the resource is used
1968 + * somewhere else. In which case we don't want to delete it.
1969 + */
1970 + list_for_each_entry(res_tmp, &sm_state->resource_list,
1971 + global_resource_list) {
1972 + if (res_tmp->res_handle == resource->res_handle) {
1973 + resource->res_handle = 0;
1974 + break;
1975 + }
1976 + }
1977 +
1978 + mutex_unlock(&(sm_state->map_lock));
1979 +
1980 + pr_debug("[%s]: freeing data - guid %x, hdl %x, base address %p\n",
1981 + __func__, resource->res_guid, resource->res_handle,
1982 + resource->res_base_mem);
1983 + resource->res_stats[FREE]++;
1984 +
1985 + /* Make sure the resource we're removing is unmapped first */
1986 + if (resource->map_count && !list_empty(&resource->map_list)) {
1987 + down_write(&current->mm->mmap_sem);
1988 + list_for_each_entry_safe(map, map_tmp, &resource->map_list,
1989 + resource_map_list) {
1990 + ret =
1991 + do_munmap(current->mm, map->res_addr,
1992 + resource->res_size, NULL);
1993 + if (ret) {
1994 + pr_err("[%s]: could not unmap resource %p\n",
1995 + __func__, resource);
1996 + }
1997 + }
1998 + up_write(&current->mm->mmap_sem);
1999 + }
2000 +
2001 + /* Free up the videocore allocated resource. */
2002 + if (resource->res_handle) {
2003 + struct vc_sm_free_t free = {
2004 + resource->res_handle, (uint32_t)resource->res_base_mem
2005 + };
2006 + int status = vc_vchi_sm_free(sm_state->sm_handle, &free,
2007 + &private->int_trans_id);
2008 + if (status != 0 && status != -EINTR) {
2009 + pr_err("[%s]: failed to free memory on videocore (status: %u, trans_id: %u)\n",
2010 + __func__, status, private->int_trans_id);
2011 + resource->res_stats[FREE_FAIL]++;
2012 + ret = -EPERM;
2013 + }
2014 + }
2015 +
2016 + if (resource->sgt)
2017 + dma_buf_unmap_attachment(resource->attach, resource->sgt,
2018 + DMA_BIDIRECTIONAL);
2019 + if (resource->attach)
2020 + dma_buf_detach(resource->dma_buf, resource->attach);
2021 + if (resource->dma_buf)
2022 + dma_buf_put(resource->dma_buf);
2023 +
2024 + /* Free up the shared resource. */
2025 + if (resource->res_shared)
2026 + vmcs_sm_release_resource(resource->res_shared, 0);
2027 +
2028 + /* Free up the local resource tracking this allocation. */
2029 + vc_sm_resource_deceased(resource, force);
2030 + kfree(resource);
2031 +}
2032 +
2033 +/*
2034 + * Dump the map table for the driver. If process is -1, dumps the whole table,
2035 + * if process is a valid pid (non -1) dump only the entries associated with the
2036 + * pid of interest.
2037 + */
2038 +static void vmcs_sm_host_walk_map_per_pid(int pid)
2039 +{
2040 + struct sm_mmap *map = NULL;
2041 +
2042 + /* Make sure the device was started properly. */
2043 + if (sm_state == NULL) {
2044 + pr_err("[%s]: invalid device\n", __func__);
2045 + return;
2046 + }
2047 +
2048 + mutex_lock(&(sm_state->map_lock));
2049 +
2050 + /* Log all applicable mapping(s). */
2051 + if (!list_empty(&sm_state->map_list)) {
2052 + list_for_each_entry(map, &sm_state->map_list, map_list) {
2053 + if (pid == -1 || map->res_pid == pid) {
2054 + pr_info("[%s]: tgid: %u - vc-hdl: %x, usr-hdl: %x, usr-addr: %lx\n",
2055 + __func__, map->res_pid, map->res_vc_hdl,
2056 + map->res_usr_hdl, map->res_addr);
2057 + }
2058 + }
2059 + }
2060 +
2061 + mutex_unlock(&(sm_state->map_lock));
2062 +}
2063 +
2064 +/*
2065 + * Dump the allocation table from host side point of view. This only dumps the
2066 + * data allocated for this process/device referenced by the file_data.
2067 + */
2068 +static void vmcs_sm_host_walk_alloc(struct sm_priv_data_t *file_data)
2069 +{
2070 + struct sm_resource_t *resource = NULL;
2071 +
2072 + /* Make sure the device was started properly. */
2073 + if ((sm_state == NULL) || (file_data == NULL)) {
2074 + pr_err("[%s]: invalid device\n", __func__);
2075 + return;
2076 + }
2077 +
2078 + mutex_lock(&(sm_state->map_lock));
2079 +
2080 + if (!list_empty(&file_data->resource_list)) {
2081 + list_for_each_entry(resource, &file_data->resource_list,
2082 + resource_list) {
2083 + pr_info("[%s]: guid: %x - hdl: %x, vc-mem: %p, size: %u, cache: %u\n",
2084 + __func__, resource->res_guid, resource->res_handle,
2085 + resource->res_base_mem, resource->res_size,
2086 + resource->res_cached);
2087 + }
2088 + }
2089 +
2090 + mutex_unlock(&(sm_state->map_lock));
2091 +}
2092 +
2093 +/* Create support for private data tracking. */
2094 +static struct sm_priv_data_t *vc_sm_create_priv_data(pid_t id)
2095 +{
2096 + char alloc_name[32];
2097 + struct sm_priv_data_t *file_data = NULL;
2098 +
2099 + /* Allocate private structure. */
2100 + file_data = kzalloc(sizeof(*file_data), GFP_KERNEL);
2101 +
2102 + if (!file_data) {
2103 + pr_err("[%s]: cannot allocate file data\n", __func__);
2104 + goto out;
2105 + }
2106 +
2107 + snprintf(alloc_name, sizeof(alloc_name), "%d", id);
2108 +
2109 + INIT_LIST_HEAD(&file_data->resource_list);
2110 + file_data->pid = id;
2111 + file_data->dir_pid = debugfs_create_dir(alloc_name,
2112 + sm_state->dir_alloc);
2113 +#if 0
2114 + /* TODO: fix this to support querying statistics per pid */
2115 +
2116 + if (IS_ERR_OR_NULL(file_data->dir_pid)) {
2117 + file_data->dir_pid = NULL;
2118 + } else {
2119 + struct dentry *dir_entry;
2120 +
2121 + dir_entry = debugfs_create_file(VC_SM_RESOURCES, 0444,
2122 + file_data->dir_pid, file_data,
2123 + vc_sm_debug_fs_fops);
2124 +
2125 + file_data->dir_res.dir_entry = dir_entry;
2126 + file_data->dir_res.priv_data = file_data;
2127 + file_data->dir_res.show = &vc_sm_alloc_show;
2128 +
2129 + dir_entry = debugfs_create_file(VC_SM_STATS, 0444,
2130 + file_data->dir_pid, file_data,
2131 + vc_sm_debug_fs_fops);
2132 +
2133 + file_data->dir_res.dir_entry = dir_entry;
2134 + file_data->dir_res.priv_data = file_data;
2135 + file_data->dir_res.show = &vc_sm_statistics_show;
2136 + }
2137 + pr_debug("[%s]: private data allocated %p\n", __func__, file_data);
2138 +
2139 +#endif
2140 +out:
2141 + return file_data;
2142 +}
2143 +
2144 +/*
2145 + * Open the device. Creates a private state to help track all allocation
2146 + * associated with this device.
2147 + */
2148 +static int vc_sm_open(struct inode *inode, struct file *file)
2149 +{
2150 + int ret = 0;
2151 +
2152 + /* Make sure the device was started properly. */
2153 + if (!sm_state) {
2154 + pr_err("[%s]: invalid device\n", __func__);
2155 + ret = -EPERM;
2156 + goto out;
2157 + }
2158 +
2159 + file->private_data = vc_sm_create_priv_data(current->tgid);
2160 + if (file->private_data == NULL) {
2161 + pr_err("[%s]: failed to create data tracker\n", __func__);
2162 +
2163 + ret = -ENOMEM;
2164 + goto out;
2165 + }
2166 +
2167 +out:
2168 + return ret;
2169 +}
2170 +
2171 +/*
2172 + * Close the device. Free up all resources still associated with this device
2173 + * at the time.
2174 + */
2175 +static int vc_sm_release(struct inode *inode, struct file *file)
2176 +{
2177 + struct sm_priv_data_t *file_data =
2178 + (struct sm_priv_data_t *)file->private_data;
2179 + struct sm_resource_t *resource;
2180 + int ret = 0;
2181 +
2182 + /* Make sure the device was started properly. */
2183 + if (sm_state == NULL || file_data == NULL) {
2184 + pr_err("[%s]: invalid device\n", __func__);
2185 + ret = -EPERM;
2186 + goto out;
2187 + }
2188 +
2189 + pr_debug("[%s]: using private data %p\n", __func__, file_data);
2190 +
2191 + if (file_data->restart_sys == -EINTR) {
2192 + struct vc_sm_action_clean_t action_clean;
2193 +
2194 + pr_debug("[%s]: releasing following EINTR on %u (trans_id: %u) (likely due to signal)...\n",
2195 + __func__, file_data->int_action,
2196 + file_data->int_trans_id);
2197 +
2198 + action_clean.res_action = file_data->int_action;
2199 + action_clean.action_trans_id = file_data->int_trans_id;
2200 +
2201 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
2202 + }
2203 +
2204 + while ((resource = vmcs_sm_acquire_first_resource(file_data)) != NULL) {
2205 + vmcs_sm_release_resource(resource, 0);
2206 + vmcs_sm_release_resource(resource, 1);
2207 + }
2208 +
2209 + /* Remove the corresponding proc entry. */
2210 + debugfs_remove_recursive(file_data->dir_pid);
2211 +
2212 + /* Terminate the private data. */
2213 + kfree(file_data);
2214 +
2215 +out:
2216 + return ret;
2217 +}
2218 +
2219 +static void vcsm_vma_open(struct vm_area_struct *vma)
2220 +{
2221 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2222 +
2223 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2224 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2225 + (int)vma->vm_pgoff);
2226 +
2227 + map->ref_count++;
2228 +}
2229 +
2230 +static void vcsm_vma_close(struct vm_area_struct *vma)
2231 +{
2232 + struct sm_mmap *map = (struct sm_mmap *)vma->vm_private_data;
2233 +
2234 + pr_debug("[%s]: virt %lx-%lx, pid %i, pfn %i\n",
2235 + __func__, vma->vm_start, vma->vm_end, (int)current->tgid,
2236 + (int)vma->vm_pgoff);
2237 +
2238 + map->ref_count--;
2239 +
2240 + /* Remove from the map table. */
2241 + if (map->ref_count == 0)
2242 + vmcs_sm_remove_map(sm_state, map->resource, map);
2243 +}
2244 +
2245 +static int vcsm_vma_fault(struct vm_fault *vmf)
2246 +{
2247 + struct sm_mmap *map = (struct sm_mmap *)vmf->vma->vm_private_data;
2248 + struct sm_resource_t *resource = map->resource;
2249 + pgoff_t page_offset;
2250 + unsigned long pfn;
2251 + int ret = 0;
2252 +
2253 + /* Lock the resource if necessary. */
2254 + if (!resource->lock_count) {
2255 + struct vc_sm_lock_unlock_t lock_unlock;
2256 + struct vc_sm_lock_result_t lock_result;
2257 + int status;
2258 +
2259 + lock_unlock.res_handle = resource->res_handle;
2260 + lock_unlock.res_mem = (uint32_t)resource->res_base_mem;
2261 +
2262 + pr_debug("[%s]: attempt to lock data - hdl %x, base address %p\n",
2263 + __func__, lock_unlock.res_handle,
2264 + (void *)lock_unlock.res_mem);
2265 +
2266 + /* Lock the videocore allocated resource. */
2267 + status = vc_vchi_sm_lock(sm_state->sm_handle,
2268 + &lock_unlock, &lock_result, 0);
2269 + if (status || !lock_result.res_mem) {
2270 + pr_err("[%s]: failed to lock memory on videocore (status: %u)\n",
2271 + __func__, status);
2272 + resource->res_stats[LOCK_FAIL]++;
2273 + return VM_FAULT_SIGBUS;
2274 + }
2275 +
2276 + pfn = vcaddr_to_pfn((unsigned long)resource->res_base_mem);
2277 + outer_inv_range(__pfn_to_phys(pfn),
2278 + __pfn_to_phys(pfn) + resource->res_size);
2279 +
2280 + resource->res_stats[LOCK]++;
2281 + resource->lock_count++;
2282 +
2283 + /* Keep track of the new base memory. */
2284 + if (lock_result.res_mem &&
2285 + lock_result.res_old_mem &&
2286 + (lock_result.res_mem != lock_result.res_old_mem)) {
2287 + resource->res_base_mem = (void *)lock_result.res_mem;
2288 + }
2289 + }
2290 +
2291 + /* We don't use vmf->pgoff since that has the fake offset */
2292 + page_offset = ((unsigned long)vmf->address - vmf->vma->vm_start);
2293 + pfn = (uint32_t)resource->res_base_mem & 0x3FFFFFFF;
2294 + pfn += mm_vc_mem_phys_addr;
2295 + pfn += page_offset;
2296 + pfn >>= PAGE_SHIFT;
2297 +
2298 + /* Finally, remap it */
2299 + ret = vm_insert_pfn(vmf->vma, (unsigned long)vmf->address, pfn);
2300 +
2301 + switch (ret) {
2302 + case 0:
2303 + case -ERESTARTSYS:
2304 + /*
2305 + * EBUSY is ok: this just means that another thread
2306 + * already did the job.
2307 + */
2308 + case -EBUSY:
2309 + return VM_FAULT_NOPAGE;
2310 + case -ENOMEM:
2311 + case -EAGAIN:
2312 + pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
2313 + pfn, (unsigned long)vmf->address, ret);
2314 + return VM_FAULT_OOM;
2315 + default:
2316 + pr_err("[%s]: failed to map page pfn:%lx virt:%lx ret:%d\n", __func__,
2317 + pfn, (unsigned long)vmf->address, ret);
2318 + return VM_FAULT_SIGBUS;
2319 + }
2320 +}
2321 +
2322 +static const struct vm_operations_struct vcsm_vm_ops = {
2323 + .open = vcsm_vma_open,
2324 + .close = vcsm_vma_close,
2325 + .fault = vcsm_vma_fault,
2326 +};
2327 +
2328 +/* Walks a VMA and clean each valid page from the cache */
2329 +static void vcsm_vma_cache_clean_page_range(unsigned long addr,
2330 + unsigned long end)
2331 +{
2332 + pgd_t *pgd;
2333 + pud_t *pud;
2334 + pmd_t *pmd;
2335 + pte_t *pte;
2336 + unsigned long pgd_next, pud_next, pmd_next;
2337 +
2338 + if (addr >= end)
2339 + return;
2340 +
2341 + /* Walk PGD */
2342 + pgd = pgd_offset(current->mm, addr);
2343 + do {
2344 + pgd_next = pgd_addr_end(addr, end);
2345 +
2346 + if (pgd_none(*pgd) || pgd_bad(*pgd))
2347 + continue;
2348 +
2349 + /* Walk PUD */
2350 + pud = pud_offset(pgd, addr);
2351 + do {
2352 + pud_next = pud_addr_end(addr, pgd_next);
2353 + if (pud_none(*pud) || pud_bad(*pud))
2354 + continue;
2355 +
2356 + /* Walk PMD */
2357 + pmd = pmd_offset(pud, addr);
2358 + do {
2359 + pmd_next = pmd_addr_end(addr, pud_next);
2360 + if (pmd_none(*pmd) || pmd_bad(*pmd))
2361 + continue;
2362 +
2363 + /* Walk PTE */
2364 + pte = pte_offset_map(pmd, addr);
2365 + do {
2366 + if (pte_none(*pte)
2367 + || !pte_present(*pte))
2368 + continue;
2369 +
2370 + /* Clean + invalidate */
2371 + dmac_flush_range((const void *) addr,
2372 + (const void *)
2373 + (addr + PAGE_SIZE));
2374 +
2375 + } while (pte++, addr +=
2376 + PAGE_SIZE, addr != pmd_next);
2377 + pte_unmap(pte);
2378 +
2379 + } while (pmd++, addr = pmd_next, addr != pud_next);
2380 +
2381 + } while (pud++, addr = pud_next, addr != pgd_next);
2382 + } while (pgd++, addr = pgd_next, addr != end);
2383 +}
2384 +
2385 +/* Map an allocated data into something that the user space. */
2386 +static int vc_sm_mmap(struct file *file, struct vm_area_struct *vma)
2387 +{
2388 + int ret = 0;
2389 + struct sm_priv_data_t *file_data =
2390 + (struct sm_priv_data_t *)file->private_data;
2391 + struct sm_resource_t *resource = NULL;
2392 + struct sm_mmap *map = NULL;
2393 +
2394 + /* Make sure the device was started properly. */
2395 + if ((sm_state == NULL) || (file_data == NULL)) {
2396 + pr_err("[%s]: invalid device\n", __func__);
2397 + return -EPERM;
2398 + }
2399 +
2400 + pr_debug("[%s]: private data %p, guid %x\n", __func__, file_data,
2401 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2402 +
2403 + /*
2404 + * We lookup to make sure that the data we are being asked to mmap is
2405 + * something that we allocated.
2406 + *
2407 + * We use the offset information as the key to tell us which resource
2408 + * we are mapping.
2409 + */
2410 + resource = vmcs_sm_acquire_resource(file_data,
2411 + ((unsigned int)vma->vm_pgoff <<
2412 + PAGE_SHIFT));
2413 + if (resource == NULL) {
2414 + pr_err("[%s]: failed to locate resource for guid %x\n", __func__,
2415 + ((unsigned int)vma->vm_pgoff << PAGE_SHIFT));
2416 + return -ENOMEM;
2417 + }
2418 +
2419 + pr_debug("[%s]: guid %x, tgid %u, %u, %u\n",
2420 + __func__, resource->res_guid, current->tgid, resource->pid,
2421 + file_data->pid);
2422 +
2423 + /* Check permissions. */
2424 + if (resource->pid && (resource->pid != current->tgid)) {
2425 + pr_err("[%s]: current tgid %u != %u owner\n",
2426 + __func__, current->tgid, resource->pid);
2427 + ret = -EPERM;
2428 + goto error;
2429 + }
2430 +
2431 + /* Verify that what we are asked to mmap is proper. */
2432 + if (resource->res_size != (unsigned int)(vma->vm_end - vma->vm_start)) {
2433 + pr_err("[%s]: size inconsistency (resource: %u - mmap: %u)\n",
2434 + __func__,
2435 + resource->res_size,
2436 + (unsigned int)(vma->vm_end - vma->vm_start));
2437 +
2438 + ret = -EINVAL;
2439 + goto error;
2440 + }
2441 +
2442 + /*
2443 + * Keep track of the tuple in the global resource list such that one
2444 + * can do a mapping lookup for address/memory handle.
2445 + */
2446 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2447 + if (map == NULL) {
2448 + pr_err("[%s]: failed to allocate global tracking resource\n",
2449 + __func__);
2450 + ret = -ENOMEM;
2451 + goto error;
2452 + }
2453 +
2454 + map->res_pid = current->tgid;
2455 + map->res_vc_hdl = resource->res_handle;
2456 + map->res_usr_hdl = resource->res_guid;
2457 + map->res_addr = (unsigned long)vma->vm_start;
2458 + map->resource = resource;
2459 + map->vma = vma;
2460 + vmcs_sm_add_map(sm_state, resource, map);
2461 +
2462 + /*
2463 + * We are not actually mapping the pages, we just provide a fault
2464 + * handler to allow pages to be mapped when accessed
2465 + */
2466 + vma->vm_flags |=
2467 + VM_IO | VM_PFNMAP | VM_DONTCOPY | VM_DONTEXPAND;
2468 + vma->vm_ops = &vcsm_vm_ops;
2469 + vma->vm_private_data = map;
2470 +
2471 + /* vm_pgoff is the first PFN of the mapped memory */
2472 + vma->vm_pgoff = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2473 + vma->vm_pgoff += mm_vc_mem_phys_addr;
2474 + vma->vm_pgoff >>= PAGE_SHIFT;
2475 +
2476 + if ((resource->res_cached == VMCS_SM_CACHE_NONE) ||
2477 + (resource->res_cached == VMCS_SM_CACHE_VC)) {
2478 + /* Allocated non host cached memory, honour it. */
2479 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2480 + }
2481 +
2482 + pr_debug("[%s]: resource %p (guid %x) - cnt %u, base address %p, handle %x, size %u (%u), cache %u\n",
2483 + __func__,
2484 + resource, resource->res_guid, resource->lock_count,
2485 + resource->res_base_mem, resource->res_handle,
2486 + resource->res_size, (unsigned int)(vma->vm_end - vma->vm_start),
2487 + resource->res_cached);
2488 +
2489 + pr_debug("[%s]: resource %p (base address %p, handle %x) - map-count %d, usr-addr %x\n",
2490 + __func__, resource, resource->res_base_mem,
2491 + resource->res_handle, resource->map_count,
2492 + (unsigned int)vma->vm_start);
2493 +
2494 + vcsm_vma_open(vma);
2495 + resource->res_stats[MAP]++;
2496 + vmcs_sm_release_resource(resource, 0);
2497 +
2498 + if (resource->map) {
2499 + /* We don't use vmf->pgoff since that has the fake offset */
2500 + unsigned long addr;
2501 +
2502 + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
2503 + /* Finally, remap it */
2504 + unsigned long pfn = (unsigned long)resource->res_base_mem & 0x3FFFFFFF;
2505 +
2506 + pfn += mm_vc_mem_phys_addr;
2507 + pfn += addr - vma->vm_start;
2508 + pfn >>= PAGE_SHIFT;
2509 + ret = vm_insert_pfn(vma, addr, pfn);
2510 + }
2511 + }
2512 +
2513 + return 0;
2514 +
2515 +error:
2516 + resource->res_stats[MAP_FAIL]++;
2517 + vmcs_sm_release_resource(resource, 0);
2518 + return ret;
2519 +}
2520 +
2521 +/* Allocate a shared memory handle and block. */
2522 +int vc_sm_ioctl_alloc(struct sm_priv_data_t *private,
2523 + struct vmcs_sm_ioctl_alloc *ioparam)
2524 +{
2525 + int ret = 0;
2526 + int status;
2527 + struct sm_resource_t *resource;
2528 + struct vc_sm_alloc_t alloc = { 0 };
2529 + struct vc_sm_alloc_result_t result = { 0 };
2530 + enum vmcs_sm_cache_e cached = ioparam->cached;
2531 + bool map = false;
2532 +
2533 + /* flag to requst buffer is mapped up front, rather than lazily */
2534 + if (cached & 0x80) {
2535 + map = true;
2536 + cached &= ~0x80;
2537 + }
2538 +
2539 + /* Setup our allocation parameters */
2540 + alloc.type = ((cached == VMCS_SM_CACHE_VC)
2541 + || (cached ==
2542 + VMCS_SM_CACHE_BOTH)) ? VC_SM_ALLOC_CACHED :
2543 + VC_SM_ALLOC_NON_CACHED;
2544 + alloc.base_unit = ioparam->size;
2545 + alloc.num_unit = ioparam->num;
2546 + alloc.allocator = current->tgid;
2547 + /* Align to kernel page size */
2548 + alloc.alignement = 4096;
2549 + /* Align the size to the kernel page size */
2550 + alloc.base_unit =
2551 + (alloc.base_unit + alloc.alignement - 1) & ~(alloc.alignement - 1);
2552 + if (*ioparam->name) {
2553 + memcpy(alloc.name, ioparam->name, sizeof(alloc.name) - 1);
2554 + } else {
2555 + memcpy(alloc.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
2556 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
2557 + }
2558 +
2559 + pr_debug("[%s]: attempt to allocate \"%s\" data - type %u, base %u (%u), num %u, alignement %u\n",
2560 + __func__, alloc.name, alloc.type, ioparam->size,
2561 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2562 +
2563 + /* Allocate local resource to track this allocation. */
2564 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2565 + if (!resource) {
2566 + ret = -ENOMEM;
2567 + goto error;
2568 + }
2569 + INIT_LIST_HEAD(&resource->map_list);
2570 + resource->ref_count++;
2571 + resource->pid = current->tgid;
2572 +
2573 + /* Allocate the videocore resource. */
2574 + status = vc_vchi_sm_alloc(sm_state->sm_handle, &alloc, &result,
2575 + &private->int_trans_id);
2576 + if (status == -EINTR) {
2577 + pr_debug("[%s]: requesting allocate memory action restart (trans_id: %u)\n",
2578 + __func__, private->int_trans_id);
2579 + ret = -ERESTARTSYS;
2580 + private->restart_sys = -EINTR;
2581 + private->int_action = VC_SM_MSG_TYPE_ALLOC;
2582 + goto error;
2583 + } else if (status != 0 || !result.res_mem) {
2584 + pr_err("[%s]: failed to allocate memory on videocore (status: %u, trans_id: %u)\n",
2585 + __func__, status, private->int_trans_id);
2586 + ret = -ENOMEM;
2587 + resource->res_stats[ALLOC_FAIL]++;
2588 + goto error;
2589 + }
2590 +
2591 + /* Keep track of the resource we created. */
2592 + resource->private = private;
2593 + resource->res_handle = result.res_handle;
2594 + resource->res_base_mem = (void *)result.res_mem;
2595 + resource->res_size = alloc.base_unit * alloc.num_unit;
2596 + resource->res_cached = cached;
2597 + resource->map = map;
2598 +
2599 + /*
2600 + * Kernel/user GUID. This global identifier is used for mmap'ing the
2601 + * allocated region from user space, it is passed as the mmap'ing
2602 + * offset, we use it to 'hide' the videocore handle/address.
2603 + */
2604 + mutex_lock(&sm_state->lock);
2605 + resource->res_guid = ++sm_state->guid;
2606 + mutex_unlock(&sm_state->lock);
2607 + resource->res_guid <<= PAGE_SHIFT;
2608 +
2609 + vmcs_sm_add_resource(private, resource);
2610 +
2611 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2612 + __func__, resource->res_guid, resource->res_handle,
2613 + resource->res_base_mem, resource->res_size,
2614 + resource->res_cached);
2615 +
2616 + /* We're done */
2617 + resource->res_stats[ALLOC]++;
2618 + ioparam->handle = resource->res_guid;
2619 + return 0;
2620 +
2621 +error:
2622 + pr_err("[%s]: failed to allocate \"%s\" data (%i) - type %u, base %u (%u), num %u, alignment %u\n",
2623 + __func__, alloc.name, ret, alloc.type, ioparam->size,
2624 + alloc.base_unit, alloc.num_unit, alloc.alignement);
2625 + if (resource != NULL) {
2626 + vc_sm_resource_deceased(resource, 1);
2627 + kfree(resource);
2628 + }
2629 + return ret;
2630 +}
2631 +
2632 +/* Share an allocate memory handle and block.*/
2633 +int vc_sm_ioctl_alloc_share(struct sm_priv_data_t *private,
2634 + struct vmcs_sm_ioctl_alloc_share *ioparam)
2635 +{
2636 + struct sm_resource_t *resource, *shared_resource;
2637 + int ret = 0;
2638 +
2639 + pr_debug("[%s]: attempt to share resource %u\n", __func__,
2640 + ioparam->handle);
2641 +
2642 + shared_resource = vmcs_sm_acquire_global_resource(ioparam->handle);
2643 + if (shared_resource == NULL) {
2644 + ret = -ENOMEM;
2645 + goto error;
2646 + }
2647 +
2648 + /* Allocate local resource to track this allocation. */
2649 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
2650 + if (resource == NULL) {
2651 + pr_err("[%s]: failed to allocate local tracking resource\n",
2652 + __func__);
2653 + ret = -ENOMEM;
2654 + goto error;
2655 + }
2656 + INIT_LIST_HEAD(&resource->map_list);
2657 + resource->ref_count++;
2658 + resource->pid = current->tgid;
2659 +
2660 + /* Keep track of the resource we created. */
2661 + resource->private = private;
2662 + resource->res_handle = shared_resource->res_handle;
2663 + resource->res_base_mem = shared_resource->res_base_mem;
2664 + resource->res_size = shared_resource->res_size;
2665 + resource->res_cached = shared_resource->res_cached;
2666 + resource->res_shared = shared_resource;
2667 +
2668 + mutex_lock(&sm_state->lock);
2669 + resource->res_guid = ++sm_state->guid;
2670 + mutex_unlock(&sm_state->lock);
2671 + resource->res_guid <<= PAGE_SHIFT;
2672 +
2673 + vmcs_sm_add_resource(private, resource);
2674 +
2675 + pr_debug("[%s]: allocated data - guid %x, hdl %x, base address %p, size %d, cache %d\n",
2676 + __func__, resource->res_guid, resource->res_handle,
2677 + resource->res_base_mem, resource->res_size,
2678 + resource->res_cached);
2679 +
2680 + /* We're done */
2681 + resource->res_stats[ALLOC]++;
2682 + ioparam->handle = resource->res_guid;
2683 + ioparam->size = resource->res_size;
2684 + return 0;
2685 +
2686 +error:
2687 + pr_err("[%s]: failed to share %u\n", __func__, ioparam->handle);
2688 + if (shared_resource != NULL)
2689 + vmcs_sm_release_resource(shared_resource, 0);
2690 +
2691 + return ret;
2692 +}
2693 +
2694 +/* Free a previously allocated shared memory handle and block.*/
2695 +static int vc_sm_ioctl_free(struct sm_priv_data_t *private,
2696 + struct vmcs_sm_ioctl_free *ioparam)
2697 +{
2698 + struct sm_resource_t *resource =
2699 + vmcs_sm_acquire_resource(private, ioparam->handle);
2700 +
2701 + if (resource == NULL) {
2702 + pr_err("[%s]: resource for guid %u does not exist\n", __func__,
2703 + ioparam->handle);
2704 + return -EINVAL;
2705 + }
2706 +
2707 + /* Check permissions. */
2708 + if (resource->pid && (resource->pid != current->tgid)) {
2709 + pr_err("[%s]: current tgid %u != %u owner\n",
2710 + __func__, current->tgid, resource->pid);
2711 + vmcs_sm_release_resource(resource, 0);
2712 + return -EPERM;
2713 + }
2714 +
2715 + vmcs_sm_release_resource(resource, 0);
2716 + vmcs_sm_release_resource(resource, 0);
2717 + return 0;
2718 +}
2719 +
2720 +/* Resize a previously allocated shared memory handle and block. */
2721 +static int vc_sm_ioctl_resize(struct sm_priv_data_t *private,
2722 + struct vmcs_sm_ioctl_resize *ioparam)
2723 +{
2724 + int ret = 0;
2725 + int status;
2726 + struct vc_sm_resize_t resize;
2727 + struct sm_resource_t *resource;
2728 +
2729 + /* Locate resource from GUID. */
2730 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2731 + if (!resource) {
2732 + pr_err("[%s]: failed resource - guid %x\n",
2733 + __func__, ioparam->handle);
2734 + ret = -EFAULT;
2735 + goto error;
2736 + }
2737 +
2738 + /*
2739 + * If the resource is locked, its reference count will be not NULL,
2740 + * in which case we will not be allowed to resize it anyways, so
2741 + * reject the attempt here.
2742 + */
2743 + if (resource->lock_count != 0) {
2744 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2745 + __func__, ioparam->handle, resource->lock_count);
2746 + ret = -EFAULT;
2747 + goto error;
2748 + }
2749 +
2750 + /* Check permissions. */
2751 + if (resource->pid && (resource->pid != current->tgid)) {
2752 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2753 + current->tgid, resource->pid);
2754 + ret = -EPERM;
2755 + goto error;
2756 + }
2757 +
2758 + if (resource->map_count != 0) {
2759 + pr_err("[%s]: cannot resize - guid %x, ref-cnt %d\n",
2760 + __func__, ioparam->handle, resource->map_count);
2761 + ret = -EFAULT;
2762 + goto error;
2763 + }
2764 +
2765 + resize.res_handle = resource->res_handle;
2766 + resize.res_mem = (uint32_t)resource->res_base_mem;
2767 + resize.res_new_size = ioparam->new_size;
2768 +
2769 + pr_debug("[%s]: attempt to resize data - guid %x, hdl %x, base address %p\n",
2770 + __func__, ioparam->handle, resize.res_handle,
2771 + (void *)resize.res_mem);
2772 +
2773 + /* Resize the videocore allocated resource. */
2774 + status = vc_vchi_sm_resize(sm_state->sm_handle, &resize,
2775 + &private->int_trans_id);
2776 + if (status == -EINTR) {
2777 + pr_debug("[%s]: requesting resize memory action restart (trans_id: %u)\n",
2778 + __func__, private->int_trans_id);
2779 + ret = -ERESTARTSYS;
2780 + private->restart_sys = -EINTR;
2781 + private->int_action = VC_SM_MSG_TYPE_RESIZE;
2782 + goto error;
2783 + } else if (status) {
2784 + pr_err("[%s]: failed to resize memory on videocore (status: %u, trans_id: %u)\n",
2785 + __func__, status, private->int_trans_id);
2786 + ret = -EPERM;
2787 + goto error;
2788 + }
2789 +
2790 + pr_debug("[%s]: success to resize data - hdl %x, size %d -> %d\n",
2791 + __func__, resize.res_handle, resource->res_size,
2792 + resize.res_new_size);
2793 +
2794 + /* Successfully resized, save the information and inform the user. */
2795 + ioparam->old_size = resource->res_size;
2796 + resource->res_size = resize.res_new_size;
2797 +
2798 +error:
2799 + if (resource)
2800 + vmcs_sm_release_resource(resource, 0);
2801 +
2802 + return ret;
2803 +}
2804 +
2805 +/* Lock a previously allocated shared memory handle and block. */
2806 +static int vc_sm_ioctl_lock(struct sm_priv_data_t *private,
2807 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
2808 + int change_cache, enum vmcs_sm_cache_e cache_type,
2809 + unsigned int vc_addr)
2810 +{
2811 + int status;
2812 + struct vc_sm_lock_unlock_t lock;
2813 + struct vc_sm_lock_result_t result;
2814 + struct sm_resource_t *resource;
2815 + int ret = 0;
2816 + struct sm_mmap *map, *map_tmp;
2817 + unsigned long phys_addr;
2818 +
2819 + map = NULL;
2820 +
2821 + /* Locate resource from GUID. */
2822 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2823 + if (resource == NULL) {
2824 + ret = -EINVAL;
2825 + goto error;
2826 + }
2827 +
2828 + /* Check permissions. */
2829 + if (resource->pid && (resource->pid != current->tgid)) {
2830 + pr_err("[%s]: current tgid %u != %u owner\n", __func__,
2831 + current->tgid, resource->pid);
2832 + ret = -EPERM;
2833 + goto error;
2834 + }
2835 +
2836 + lock.res_handle = resource->res_handle;
2837 + lock.res_mem = (uint32_t)resource->res_base_mem;
2838 +
2839 + /* Take the lock and get the address to be mapped. */
2840 + if (vc_addr == 0) {
2841 + pr_debug("[%s]: attempt to lock data - guid %x, hdl %x, base address %p\n",
2842 + __func__, ioparam->handle, lock.res_handle,
2843 + (void *)lock.res_mem);
2844 +
2845 + /* Lock the videocore allocated resource. */
2846 + status = vc_vchi_sm_lock(sm_state->sm_handle, &lock, &result,
2847 + &private->int_trans_id);
2848 + if (status == -EINTR) {
2849 + pr_debug("[%s]: requesting lock memory action restart (trans_id: %u)\n",
2850 + __func__, private->int_trans_id);
2851 + ret = -ERESTARTSYS;
2852 + private->restart_sys = -EINTR;
2853 + private->int_action = VC_SM_MSG_TYPE_LOCK;
2854 + goto error;
2855 + } else if (status ||
2856 + (!status && !(void *)result.res_mem)) {
2857 + pr_err("[%s]: failed to lock memory on videocore (status: %u, trans_id: %u)\n",
2858 + __func__, status, private->int_trans_id);
2859 + ret = -EPERM;
2860 + resource->res_stats[LOCK_FAIL]++;
2861 + goto error;
2862 + }
2863 +
2864 + pr_debug("[%s]: succeed to lock data - hdl %x, base address %p (%p), ref-cnt %d\n",
2865 + __func__, lock.res_handle, (void *)result.res_mem,
2866 + (void *)lock.res_mem, resource->lock_count);
2867 + }
2868 + /* Lock assumed taken already, address to be mapped is known. */
2869 + else
2870 + resource->res_base_mem = (void *)vc_addr;
2871 +
2872 + resource->res_stats[LOCK]++;
2873 + resource->lock_count++;
2874 +
2875 + /* Keep track of the new base memory allocation if it has changed. */
2876 + if ((vc_addr == 0) &&
2877 + ((void *)result.res_mem) &&
2878 + ((void *)result.res_old_mem) &&
2879 + (result.res_mem != result.res_old_mem)) {
2880 + resource->res_base_mem = (void *)result.res_mem;
2881 +
2882 + /* Kernel allocated resources. */
2883 + if (resource->pid == 0) {
2884 + if (!list_empty(&resource->map_list)) {
2885 + list_for_each_entry_safe(map, map_tmp,
2886 + &resource->map_list,
2887 + resource_map_list) {
2888 + if (map->res_addr) {
2889 + iounmap((void *)map->res_addr);
2890 + map->res_addr = 0;
2891 +
2892 + vmcs_sm_remove_map(sm_state,
2893 + map->resource,
2894 + map);
2895 + break;
2896 + }
2897 + }
2898 + }
2899 + }
2900 + }
2901 +
2902 + if (change_cache)
2903 + resource->res_cached = cache_type;
2904 +
2905 + if (resource->map_count) {
2906 + ioparam->addr =
2907 + vmcs_sm_usr_address_from_pid_and_usr_handle(
2908 + current->tgid, ioparam->handle);
2909 +
2910 + pr_debug("[%s] map_count %d private->pid %d current->tgid %d hnd %x addr %u\n",
2911 + __func__, resource->map_count, private->pid,
2912 + current->tgid, ioparam->handle, ioparam->addr);
2913 + } else {
2914 + /* Kernel allocated resources. */
2915 + if (resource->pid == 0) {
2916 + pr_debug("[%s]: attempt mapping kernel resource - guid %x, hdl %x\n",
2917 + __func__, ioparam->handle, lock.res_handle);
2918 +
2919 + ioparam->addr = 0;
2920 +
2921 + map = kzalloc(sizeof(*map), GFP_KERNEL);
2922 + if (map == NULL) {
2923 + pr_err("[%s]: failed allocating tracker\n",
2924 + __func__);
2925 + ret = -ENOMEM;
2926 + goto error;
2927 + } else {
2928 + phys_addr = (uint32_t)resource->res_base_mem &
2929 + 0x3FFFFFFF;
2930 + phys_addr += mm_vc_mem_phys_addr;
2931 + if (resource->res_cached
2932 + == VMCS_SM_CACHE_HOST) {
2933 + ioparam->addr = (unsigned long)
2934 + /* TODO - make cached work */
2935 + ioremap_nocache(phys_addr,
2936 + resource->res_size);
2937 +
2938 + pr_debug("[%s]: mapping kernel - guid %x, hdl %x - cached mapping %u\n",
2939 + __func__, ioparam->handle,
2940 + lock.res_handle, ioparam->addr);
2941 + } else {
2942 + ioparam->addr = (unsigned long)
2943 + ioremap_nocache(phys_addr,
2944 + resource->res_size);
2945 +
2946 + pr_debug("[%s]: mapping kernel- guid %x, hdl %x - non cached mapping %u\n",
2947 + __func__, ioparam->handle,
2948 + lock.res_handle, ioparam->addr);
2949 + }
2950 +
2951 + map->res_pid = 0;
2952 + map->res_vc_hdl = resource->res_handle;
2953 + map->res_usr_hdl = resource->res_guid;
2954 + map->res_addr = ioparam->addr;
2955 + map->resource = resource;
2956 + map->vma = NULL;
2957 +
2958 + vmcs_sm_add_map(sm_state, resource, map);
2959 + }
2960 + } else
2961 + ioparam->addr = 0;
2962 + }
2963 +
2964 +error:
2965 + if (resource)
2966 + vmcs_sm_release_resource(resource, 0);
2967 +
2968 + return ret;
2969 +}
2970 +
2971 +/* Unlock a previously allocated shared memory handle and block.*/
2972 +static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
2973 + struct vmcs_sm_ioctl_lock_unlock *ioparam,
2974 + int flush, int wait_reply, int no_vc_unlock)
2975 +{
2976 + int status;
2977 + struct vc_sm_lock_unlock_t unlock;
2978 + struct sm_mmap *map, *map_tmp;
2979 + struct sm_resource_t *resource;
2980 + int ret = 0;
2981 +
2982 + map = NULL;
2983 +
2984 + /* Locate resource from GUID. */
2985 + resource = vmcs_sm_acquire_resource(private, ioparam->handle);
2986 + if (resource == NULL) {
2987 + ret = -EINVAL;
2988 + goto error;
2989 + }
2990 +
2991 + /* Check permissions. */
2992 + if (resource->pid && (resource->pid != current->tgid)) {
2993 + pr_err("[%s]: current tgid %u != %u owner\n",
2994 + __func__, current->tgid, resource->pid);
2995 + ret = -EPERM;
2996 + goto error;
2997 + }
2998 +
2999 + unlock.res_handle = resource->res_handle;
3000 + unlock.res_mem = (uint32_t)resource->res_base_mem;
3001 +
3002 + pr_debug("[%s]: attempt to unlock data - guid %x, hdl %x, base address %p\n",
3003 + __func__, ioparam->handle, unlock.res_handle,
3004 + (void *)unlock.res_mem);
3005 +
3006 + /* User space allocated resources. */
3007 + if (resource->pid) {
3008 + /* Flush if requested */
3009 + if (resource->res_cached && flush) {
3010 + dma_addr_t phys_addr = 0;
3011 +
3012 + resource->res_stats[FLUSH]++;
3013 +
3014 + phys_addr =
3015 + (dma_addr_t)((uint32_t)resource->res_base_mem &
3016 + 0x3FFFFFFF);
3017 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3018 +
3019 + /* L1 cache flush */
3020 + down_read(&current->mm->mmap_sem);
3021 + list_for_each_entry(map, &resource->map_list,
3022 + resource_map_list) {
3023 + if (map->vma) {
3024 + unsigned long start;
3025 + unsigned long end;
3026 +
3027 + start = map->vma->vm_start;
3028 + end = map->vma->vm_end;
3029 +
3030 + vcsm_vma_cache_clean_page_range(
3031 + start, end);
3032 + }
3033 + }
3034 + up_read(&current->mm->mmap_sem);
3035 +
3036 + /* L2 cache flush */
3037 + outer_clean_range(phys_addr,
3038 + phys_addr +
3039 + (size_t) resource->res_size);
3040 + }
3041 +
3042 + /* We need to zap all the vmas associated with this resource */
3043 + if (resource->lock_count == 1) {
3044 + down_read(&current->mm->mmap_sem);
3045 + list_for_each_entry(map, &resource->map_list,
3046 + resource_map_list) {
3047 + if (map->vma) {
3048 + zap_vma_ptes(map->vma,
3049 + map->vma->vm_start,
3050 + map->vma->vm_end -
3051 + map->vma->vm_start);
3052 + }
3053 + }
3054 + up_read(&current->mm->mmap_sem);
3055 + }
3056 + }
3057 + /* Kernel allocated resources. */
3058 + else {
3059 + /* Global + Taken in this context */
3060 + if (resource->ref_count == 2) {
3061 + if (!list_empty(&resource->map_list)) {
3062 + list_for_each_entry_safe(map, map_tmp,
3063 + &resource->map_list,
3064 + resource_map_list) {
3065 + if (map->res_addr) {
3066 + if (flush &&
3067 + (resource->res_cached ==
3068 + VMCS_SM_CACHE_HOST)) {
3069 + unsigned long
3070 + phys_addr;
3071 + phys_addr = (uint32_t)
3072 + resource->res_base_mem & 0x3FFFFFFF;
3073 + phys_addr +=
3074 + mm_vc_mem_phys_addr;
3075 +
3076 + /* L1 cache flush */
3077 + dmac_flush_range((const
3078 + void
3079 + *)
3080 + map->res_addr, (const void *)
3081 + (map->res_addr + resource->res_size));
3082 +
3083 + /* L2 cache flush */
3084 + outer_clean_range
3085 + (phys_addr,
3086 + phys_addr +
3087 + (size_t)
3088 + resource->res_size);
3089 + }
3090 +
3091 + iounmap((void *)map->res_addr);
3092 + map->res_addr = 0;
3093 +
3094 + vmcs_sm_remove_map(sm_state,
3095 + map->resource,
3096 + map);
3097 + break;
3098 + }
3099 + }
3100 + }
3101 + }
3102 + }
3103 +
3104 + if (resource->lock_count) {
3105 + /* Bypass the videocore unlock. */
3106 + if (no_vc_unlock)
3107 + status = 0;
3108 + /* Unlock the videocore allocated resource. */
3109 + else {
3110 + status =
3111 + vc_vchi_sm_unlock(sm_state->sm_handle, &unlock,
3112 + &private->int_trans_id,
3113 + wait_reply);
3114 + if (status == -EINTR) {
3115 + pr_debug("[%s]: requesting unlock memory action restart (trans_id: %u)\n",
3116 + __func__, private->int_trans_id);
3117 +
3118 + ret = -ERESTARTSYS;
3119 + resource->res_stats[UNLOCK]--;
3120 + private->restart_sys = -EINTR;
3121 + private->int_action = VC_SM_MSG_TYPE_UNLOCK;
3122 + goto error;
3123 + } else if (status != 0) {
3124 + pr_err("[%s]: failed to unlock vc mem (status: %u, trans_id: %u)\n",
3125 + __func__, status, private->int_trans_id);
3126 +
3127 + ret = -EPERM;
3128 + resource->res_stats[UNLOCK_FAIL]++;
3129 + goto error;
3130 + }
3131 + }
3132 +
3133 + resource->res_stats[UNLOCK]++;
3134 + resource->lock_count--;
3135 + }
3136 +
3137 + pr_debug("[%s]: success to unlock data - hdl %x, base address %p, ref-cnt %d\n",
3138 + __func__, unlock.res_handle, (void *)unlock.res_mem,
3139 + resource->lock_count);
3140 +
3141 +error:
3142 + if (resource)
3143 + vmcs_sm_release_resource(resource, 0);
3144 +
3145 + return ret;
3146 +}
3147 +
3148 +/* Import a contiguous block of memory to be shared with VC. */
3149 +int vc_sm_ioctl_import_dmabuf(struct sm_priv_data_t *private,
3150 + struct vmcs_sm_ioctl_import_dmabuf *ioparam,
3151 + struct dma_buf *src_dma_buf)
3152 +{
3153 + int ret = 0;
3154 + int status;
3155 + struct sm_resource_t *resource = NULL;
3156 + struct vc_sm_import import = { 0 };
3157 + struct vc_sm_import_result result = { 0 };
3158 + struct dma_buf *dma_buf;
3159 + struct dma_buf_attachment *attach = NULL;
3160 + struct sg_table *sgt = NULL;
3161 +
3162 + /* Setup our allocation parameters */
3163 + if (src_dma_buf) {
3164 + get_dma_buf(src_dma_buf);
3165 + dma_buf = src_dma_buf;
3166 + } else {
3167 + dma_buf = dma_buf_get(ioparam->dmabuf_fd);
3168 + }
3169 + if (IS_ERR(dma_buf))
3170 + return PTR_ERR(dma_buf);
3171 +
3172 + attach = dma_buf_attach(dma_buf, &sm_state->pdev->dev);
3173 + if (IS_ERR(attach)) {
3174 + ret = PTR_ERR(attach);
3175 + goto error;
3176 + }
3177 +
3178 + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
3179 + if (IS_ERR(sgt)) {
3180 + ret = PTR_ERR(sgt);
3181 + goto error;
3182 + }
3183 +
3184 + /* Verify that the address block is contiguous */
3185 + if (sgt->nents != 1) {
3186 + ret = -ENOMEM;
3187 + goto error;
3188 + }
3189 +
3190 + import.type = ((ioparam->cached == VMCS_SM_CACHE_VC) ||
3191 + (ioparam->cached == VMCS_SM_CACHE_BOTH)) ?
3192 + VC_SM_ALLOC_CACHED : VC_SM_ALLOC_NON_CACHED;
3193 + import.addr = (uint32_t)sg_dma_address(sgt->sgl);
3194 + import.size = sg_dma_len(sgt->sgl);
3195 + import.allocator = current->tgid;
3196 +
3197 + if (*ioparam->name)
3198 + memcpy(import.name, ioparam->name, sizeof(import.name) - 1);
3199 + else
3200 + memcpy(import.name, VMCS_SM_RESOURCE_NAME_DEFAULT,
3201 + sizeof(VMCS_SM_RESOURCE_NAME_DEFAULT));
3202 +
3203 + pr_debug("[%s]: attempt to import \"%s\" data - type %u, addr %p, size %u\n",
3204 + __func__, import.name, import.type,
3205 + (void *)import.addr, import.size);
3206 +
3207 + /* Allocate local resource to track this allocation. */
3208 + resource = kzalloc(sizeof(*resource), GFP_KERNEL);
3209 + if (!resource) {
3210 + ret = -ENOMEM;
3211 + goto error;
3212 + }
3213 + INIT_LIST_HEAD(&resource->map_list);
3214 + resource->ref_count++;
3215 + resource->pid = current->tgid;
3216 +
3217 + /* Allocate the videocore resource. */
3218 + status = vc_vchi_sm_import(sm_state->sm_handle, &import, &result,
3219 + &private->int_trans_id);
3220 + if (status == -EINTR) {
3221 + pr_debug("[%s]: requesting import memory action restart (trans_id: %u)\n",
3222 + __func__, private->int_trans_id);
3223 + ret = -ERESTARTSYS;
3224 + private->restart_sys = -EINTR;
3225 + private->int_action = VC_SM_MSG_TYPE_IMPORT;
3226 + goto error;
3227 + } else if (status || !result.res_handle) {
3228 + pr_debug("[%s]: failed to import memory on videocore (status: %u, trans_id: %u)\n",
3229 + __func__, status, private->int_trans_id);
3230 + ret = -ENOMEM;
3231 + resource->res_stats[ALLOC_FAIL]++;
3232 + goto error;
3233 + }
3234 +
3235 + /* Keep track of the resource we created. */
3236 + resource->private = private;
3237 + resource->res_handle = result.res_handle;
3238 + resource->res_size = import.size;
3239 + resource->res_cached = ioparam->cached;
3240 +
3241 + resource->dma_buf = dma_buf;
3242 + resource->attach = attach;
3243 + resource->sgt = sgt;
3244 + resource->dma_addr = sg_dma_address(sgt->sgl);
3245 +
3246 + /*
3247 + * Kernel/user GUID. This global identifier is used for mmap'ing the
3248 + * allocated region from user space, it is passed as the mmap'ing
3249 + * offset, we use it to 'hide' the videocore handle/address.
3250 + */
3251 + mutex_lock(&sm_state->lock);
3252 + resource->res_guid = ++sm_state->guid;
3253 + mutex_unlock(&sm_state->lock);
3254 + resource->res_guid <<= PAGE_SHIFT;
3255 +
3256 + vmcs_sm_add_resource(private, resource);
3257 +
3258 + /* We're done */
3259 + resource->res_stats[IMPORT]++;
3260 + ioparam->handle = resource->res_guid;
3261 + return 0;
3262 +
3263 +error:
3264 + resource->res_stats[IMPORT_FAIL]++;
3265 + if (resource) {
3266 + vc_sm_resource_deceased(resource, 1);
3267 + kfree(resource);
3268 + }
3269 + if (sgt)
3270 + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
3271 + if (attach)
3272 + dma_buf_detach(dma_buf, attach);
3273 + dma_buf_put(dma_buf);
3274 + return ret;
3275 +}
3276 +
3277 +/* Handle control from host. */
3278 +static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3279 +{
3280 + int ret = 0;
3281 + unsigned int cmdnr = _IOC_NR(cmd);
3282 + struct sm_priv_data_t *file_data =
3283 + (struct sm_priv_data_t *)file->private_data;
3284 + struct sm_resource_t *resource = NULL;
3285 +
3286 + /* Validate we can work with this device. */
3287 + if ((sm_state == NULL) || (file_data == NULL)) {
3288 + pr_err("[%s]: invalid device\n", __func__);
3289 + ret = -EPERM;
3290 + goto out;
3291 + }
3292 +
3293 + pr_debug("[%s]: cmd %x tgid %u, owner %u\n", __func__, cmdnr,
3294 + current->tgid, file_data->pid);
3295 +
3296 + /* Action is a re-post of a previously interrupted action? */
3297 + if (file_data->restart_sys == -EINTR) {
3298 + struct vc_sm_action_clean_t action_clean;
3299 +
3300 + pr_debug("[%s]: clean up of action %u (trans_id: %u) following EINTR\n",
3301 + __func__, file_data->int_action,
3302 + file_data->int_trans_id);
3303 +
3304 + action_clean.res_action = file_data->int_action;
3305 + action_clean.action_trans_id = file_data->int_trans_id;
3306 +
3307 + vc_vchi_sm_clean_up(sm_state->sm_handle, &action_clean);
3308 +
3309 + file_data->restart_sys = 0;
3310 + }
3311 +
3312 + /* Now process the command. */
3313 + switch (cmdnr) {
3314 + /* New memory allocation.
3315 + */
3316 + case VMCS_SM_CMD_ALLOC:
3317 + {
3318 + struct vmcs_sm_ioctl_alloc ioparam;
3319 +
3320 + /* Get the parameter data. */
3321 + if (copy_from_user
3322 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3323 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3324 + __func__, cmdnr);
3325 + ret = -EFAULT;
3326 + goto out;
3327 + }
3328 +
3329 + ret = vc_sm_ioctl_alloc(file_data, &ioparam);
3330 + if (!ret &&
3331 + (copy_to_user((void *)arg,
3332 + &ioparam, sizeof(ioparam)) != 0)) {
3333 + struct vmcs_sm_ioctl_free freeparam = {
3334 + ioparam.handle
3335 + };
3336 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3337 + __func__, cmdnr);
3338 + vc_sm_ioctl_free(file_data, &freeparam);
3339 + ret = -EFAULT;
3340 + }
3341 +
3342 + /* Done. */
3343 + goto out;
3344 + }
3345 + break;
3346 +
3347 + /* Share existing memory allocation. */
3348 + case VMCS_SM_CMD_ALLOC_SHARE:
3349 + {
3350 + struct vmcs_sm_ioctl_alloc_share ioparam;
3351 +
3352 + /* Get the parameter data. */
3353 + if (copy_from_user
3354 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3355 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3356 + __func__, cmdnr);
3357 + ret = -EFAULT;
3358 + goto out;
3359 + }
3360 +
3361 + ret = vc_sm_ioctl_alloc_share(file_data, &ioparam);
3362 +
3363 + /* Copy result back to user. */
3364 + if (!ret
3365 + && copy_to_user((void *)arg, &ioparam,
3366 + sizeof(ioparam)) != 0) {
3367 + struct vmcs_sm_ioctl_free freeparam = {
3368 + ioparam.handle
3369 + };
3370 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3371 + __func__, cmdnr);
3372 + vc_sm_ioctl_free(file_data, &freeparam);
3373 + ret = -EFAULT;
3374 + }
3375 +
3376 + /* Done. */
3377 + goto out;
3378 + }
3379 + break;
3380 +
3381 + case VMCS_SM_CMD_IMPORT_DMABUF:
3382 + {
3383 + struct vmcs_sm_ioctl_import_dmabuf ioparam;
3384 +
3385 + /* Get the parameter data. */
3386 + if (copy_from_user
3387 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3388 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3389 + __func__, cmdnr);
3390 + ret = -EFAULT;
3391 + goto out;
3392 + }
3393 +
3394 + ret = vc_sm_ioctl_import_dmabuf(file_data, &ioparam,
3395 + NULL);
3396 + if (!ret &&
3397 + (copy_to_user((void *)arg,
3398 + &ioparam, sizeof(ioparam)) != 0)) {
3399 + struct vmcs_sm_ioctl_free freeparam = {
3400 + ioparam.handle
3401 + };
3402 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3403 + __func__, cmdnr);
3404 + vc_sm_ioctl_free(file_data, &freeparam);
3405 + ret = -EFAULT;
3406 + }
3407 +
3408 + /* Done. */
3409 + goto out;
3410 + }
3411 + break;
3412 +
3413 + /* Lock (attempt to) *and* register a cache behavior change. */
3414 + case VMCS_SM_CMD_LOCK_CACHE:
3415 + {
3416 + struct vmcs_sm_ioctl_lock_cache ioparam;
3417 + struct vmcs_sm_ioctl_lock_unlock lock;
3418 +
3419 + /* Get parameter data. */
3420 + if (copy_from_user
3421 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3422 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3423 + __func__, cmdnr);
3424 + ret = -EFAULT;
3425 + goto out;
3426 + }
3427 +
3428 + lock.handle = ioparam.handle;
3429 + ret =
3430 + vc_sm_ioctl_lock(file_data, &lock, 1,
3431 + ioparam.cached, 0);
3432 +
3433 + /* Done. */
3434 + goto out;
3435 + }
3436 + break;
3437 +
3438 + /* Lock (attempt to) existing memory allocation. */
3439 + case VMCS_SM_CMD_LOCK:
3440 + {
3441 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3442 +
3443 + /* Get parameter data. */
3444 + if (copy_from_user
3445 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3446 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3447 + __func__, cmdnr);
3448 + ret = -EFAULT;
3449 + goto out;
3450 + }
3451 +
3452 + ret = vc_sm_ioctl_lock(file_data, &ioparam, 0, 0, 0);
3453 +
3454 + /* Copy result back to user. */
3455 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3456 + != 0) {
3457 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3458 + __func__, cmdnr);
3459 + ret = -EFAULT;
3460 + }
3461 +
3462 + /* Done. */
3463 + goto out;
3464 + }
3465 + break;
3466 +
3467 + /* Unlock (attempt to) existing memory allocation. */
3468 + case VMCS_SM_CMD_UNLOCK:
3469 + {
3470 + struct vmcs_sm_ioctl_lock_unlock ioparam;
3471 +
3472 + /* Get parameter data. */
3473 + if (copy_from_user
3474 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3475 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3476 + __func__, cmdnr);
3477 + ret = -EFAULT;
3478 + goto out;
3479 + }
3480 +
3481 + ret = vc_sm_ioctl_unlock(file_data, &ioparam, 0, 1, 0);
3482 +
3483 + /* Done. */
3484 + goto out;
3485 + }
3486 + break;
3487 +
3488 + /* Resize (attempt to) existing memory allocation. */
3489 + case VMCS_SM_CMD_RESIZE:
3490 + {
3491 + struct vmcs_sm_ioctl_resize ioparam;
3492 +
3493 + /* Get parameter data. */
3494 + if (copy_from_user
3495 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3496 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3497 + __func__, cmdnr);
3498 + ret = -EFAULT;
3499 + goto out;
3500 + }
3501 +
3502 + ret = vc_sm_ioctl_resize(file_data, &ioparam);
3503 +
3504 + /* Copy result back to user. */
3505 + if (copy_to_user((void *)arg, &ioparam, sizeof(ioparam))
3506 + != 0) {
3507 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3508 + __func__, cmdnr);
3509 + ret = -EFAULT;
3510 + }
3511 + goto out;
3512 + }
3513 + break;
3514 +
3515 + /* Terminate existing memory allocation.
3516 + */
3517 + case VMCS_SM_CMD_FREE:
3518 + {
3519 + struct vmcs_sm_ioctl_free ioparam;
3520 +
3521 + /* Get parameter data.
3522 + */
3523 + if (copy_from_user
3524 + (&ioparam, (void *)arg, sizeof(ioparam)) != 0) {
3525 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3526 + __func__, cmdnr);
3527 + ret = -EFAULT;
3528 + goto out;
3529 + }
3530 +
3531 + ret = vc_sm_ioctl_free(file_data, &ioparam);
3532 +
3533 + /* Done.
3534 + */
3535 + goto out;
3536 + }
3537 + break;
3538 +
3539 + /* Walk allocation on videocore, information shows up in the
3540 + ** videocore log.
3541 + */
3542 + case VMCS_SM_CMD_VC_WALK_ALLOC:
3543 + {
3544 + pr_debug("[%s]: invoking walk alloc\n", __func__);
3545 +
3546 + if (vc_vchi_sm_walk_alloc(sm_state->sm_handle) != 0)
3547 + pr_err("[%s]: failed to walk-alloc on videocore\n",
3548 + __func__);
3549 +
3550 + /* Done.
3551 + */
3552 + goto out;
3553 + }
3554 + break;
3555 + /* Walk mapping table on host, information shows up in the
3556 + ** kernel log.
3557 + */
3558 + case VMCS_SM_CMD_HOST_WALK_MAP:
3559 + {
3560 + /* Use pid of -1 to tell to walk the whole map. */
3561 + vmcs_sm_host_walk_map_per_pid(-1);
3562 +
3563 + /* Done. */
3564 + goto out;
3565 + }
3566 + break;
3567 +
3568 + /* Walk mapping table per process on host. */
3569 + case VMCS_SM_CMD_HOST_WALK_PID_ALLOC:
3570 + {
3571 + struct vmcs_sm_ioctl_walk ioparam;
3572 +
3573 + /* Get parameter data. */
3574 + if (copy_from_user(&ioparam,
3575 + (void *)arg, sizeof(ioparam)) != 0) {
3576 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3577 + __func__, cmdnr);
3578 + ret = -EFAULT;
3579 + goto out;
3580 + }
3581 +
3582 + vmcs_sm_host_walk_alloc(file_data);
3583 +
3584 + /* Done. */
3585 + goto out;
3586 + }
3587 + break;
3588 +
3589 + /* Walk allocation per process on host. */
3590 + case VMCS_SM_CMD_HOST_WALK_PID_MAP:
3591 + {
3592 + struct vmcs_sm_ioctl_walk ioparam;
3593 +
3594 + /* Get parameter data. */
3595 + if (copy_from_user(&ioparam,
3596 + (void *)arg, sizeof(ioparam)) != 0) {
3597 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3598 + __func__, cmdnr);
3599 + ret = -EFAULT;
3600 + goto out;
3601 + }
3602 +
3603 + vmcs_sm_host_walk_map_per_pid(ioparam.pid);
3604 +
3605 + /* Done. */
3606 + goto out;
3607 + }
3608 + break;
3609 +
3610 + /* Gets the size of the memory associated with a user handle. */
3611 + case VMCS_SM_CMD_SIZE_USR_HANDLE:
3612 + {
3613 + struct vmcs_sm_ioctl_size ioparam;
3614 +
3615 + /* Get parameter data. */
3616 + if (copy_from_user(&ioparam,
3617 + (void *)arg, sizeof(ioparam)) != 0) {
3618 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3619 + __func__, cmdnr);
3620 + ret = -EFAULT;
3621 + goto out;
3622 + }
3623 +
3624 + /* Locate resource from GUID. */
3625 + resource =
3626 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3627 + if (resource != NULL) {
3628 + ioparam.size = resource->res_size;
3629 + vmcs_sm_release_resource(resource, 0);
3630 + } else {
3631 + ioparam.size = 0;
3632 + }
3633 +
3634 + if (copy_to_user((void *)arg,
3635 + &ioparam, sizeof(ioparam)) != 0) {
3636 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3637 + __func__, cmdnr);
3638 + ret = -EFAULT;
3639 + }
3640 +
3641 + /* Done. */
3642 + goto out;
3643 + }
3644 + break;
3645 +
3646 + /* Verify we are dealing with a valid resource. */
3647 + case VMCS_SM_CMD_CHK_USR_HANDLE:
3648 + {
3649 + struct vmcs_sm_ioctl_chk ioparam;
3650 +
3651 + /* Get parameter data. */
3652 + if (copy_from_user(&ioparam,
3653 + (void *)arg, sizeof(ioparam)) != 0) {
3654 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3655 + __func__, cmdnr);
3656 +
3657 + ret = -EFAULT;
3658 + goto out;
3659 + }
3660 +
3661 + /* Locate resource from GUID. */
3662 + resource =
3663 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3664 + if (resource == NULL)
3665 + ret = -EINVAL;
3666 + /*
3667 + * If the resource is cacheable, return additional
3668 + * information that may be needed to flush the cache.
3669 + */
3670 + else if ((resource->res_cached == VMCS_SM_CACHE_HOST) ||
3671 + (resource->res_cached == VMCS_SM_CACHE_BOTH)) {
3672 + ioparam.addr =
3673 + vmcs_sm_usr_address_from_pid_and_usr_handle
3674 + (current->tgid, ioparam.handle);
3675 + ioparam.size = resource->res_size;
3676 + ioparam.cache = resource->res_cached;
3677 + } else {
3678 + ioparam.addr = 0;
3679 + ioparam.size = 0;
3680 + ioparam.cache = resource->res_cached;
3681 + }
3682 +
3683 + if (resource)
3684 + vmcs_sm_release_resource(resource, 0);
3685 +
3686 + if (copy_to_user((void *)arg,
3687 + &ioparam, sizeof(ioparam)) != 0) {
3688 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3689 + __func__, cmdnr);
3690 + ret = -EFAULT;
3691 + }
3692 +
3693 + /* Done. */
3694 + goto out;
3695 + }
3696 + break;
3697 +
3698 + /*
3699 + * Maps a user handle given the process and the virtual address.
3700 + */
3701 + case VMCS_SM_CMD_MAPPED_USR_HANDLE:
3702 + {
3703 + struct vmcs_sm_ioctl_map ioparam;
3704 +
3705 + /* Get parameter data. */
3706 + if (copy_from_user(&ioparam,
3707 + (void *)arg, sizeof(ioparam)) != 0) {
3708 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3709 + __func__, cmdnr);
3710 +
3711 + ret = -EFAULT;
3712 + goto out;
3713 + }
3714 +
3715 + ioparam.handle =
3716 + vmcs_sm_usr_handle_from_pid_and_address(
3717 + ioparam.pid, ioparam.addr);
3718 +
3719 + resource =
3720 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3721 + if ((resource != NULL)
3722 + && ((resource->res_cached == VMCS_SM_CACHE_HOST)
3723 + || (resource->res_cached ==
3724 + VMCS_SM_CACHE_BOTH))) {
3725 + ioparam.size = resource->res_size;
3726 + } else {
3727 + ioparam.size = 0;
3728 + }
3729 +
3730 + if (resource)
3731 + vmcs_sm_release_resource(resource, 0);
3732 +
3733 + if (copy_to_user((void *)arg,
3734 + &ioparam, sizeof(ioparam)) != 0) {
3735 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3736 + __func__, cmdnr);
3737 + ret = -EFAULT;
3738 + }
3739 +
3740 + /* Done. */
3741 + goto out;
3742 + }
3743 + break;
3744 +
3745 + /*
3746 + * Maps a videocore handle given process and virtual address.
3747 + */
3748 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR:
3749 + {
3750 + struct vmcs_sm_ioctl_map ioparam;
3751 +
3752 + /* Get parameter data. */
3753 + if (copy_from_user(&ioparam,
3754 + (void *)arg, sizeof(ioparam)) != 0) {
3755 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3756 + __func__, cmdnr);
3757 + ret = -EFAULT;
3758 + goto out;
3759 + }
3760 +
3761 + ioparam.handle = vmcs_sm_vc_handle_from_pid_and_address(
3762 + ioparam.pid, ioparam.addr);
3763 +
3764 + if (copy_to_user((void *)arg,
3765 + &ioparam, sizeof(ioparam)) != 0) {
3766 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3767 + __func__, cmdnr);
3768 +
3769 + ret = -EFAULT;
3770 + }
3771 +
3772 + /* Done. */
3773 + goto out;
3774 + }
3775 + break;
3776 +
3777 + /* Maps a videocore handle given process and user handle. */
3778 + case VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL:
3779 + {
3780 + struct vmcs_sm_ioctl_map ioparam;
3781 +
3782 + /* Get parameter data. */
3783 + if (copy_from_user(&ioparam,
3784 + (void *)arg, sizeof(ioparam)) != 0) {
3785 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3786 + __func__, cmdnr);
3787 + ret = -EFAULT;
3788 + goto out;
3789 + }
3790 +
3791 + /* Locate resource from GUID. */
3792 + resource =
3793 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3794 + if (resource != NULL) {
3795 + ioparam.handle = resource->res_handle;
3796 + vmcs_sm_release_resource(resource, 0);
3797 + } else {
3798 + ioparam.handle = 0;
3799 + }
3800 +
3801 + if (copy_to_user((void *)arg,
3802 + &ioparam, sizeof(ioparam)) != 0) {
3803 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3804 + __func__, cmdnr);
3805 +
3806 + ret = -EFAULT;
3807 + }
3808 +
3809 + /* Done. */
3810 + goto out;
3811 + }
3812 + break;
3813 +
3814 + /*
3815 + * Maps a videocore address given process and videocore handle.
3816 + */
3817 + case VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL:
3818 + {
3819 + struct vmcs_sm_ioctl_map ioparam;
3820 +
3821 + /* Get parameter data. */
3822 + if (copy_from_user(&ioparam,
3823 + (void *)arg, sizeof(ioparam)) != 0) {
3824 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3825 + __func__, cmdnr);
3826 +
3827 + ret = -EFAULT;
3828 + goto out;
3829 + }
3830 +
3831 + /* Locate resource from GUID. */
3832 + resource =
3833 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3834 + if (resource != NULL) {
3835 + ioparam.addr =
3836 + (unsigned int)resource->res_base_mem;
3837 + vmcs_sm_release_resource(resource, 0);
3838 + } else {
3839 + ioparam.addr = 0;
3840 + }
3841 +
3842 + if (copy_to_user((void *)arg,
3843 + &ioparam, sizeof(ioparam)) != 0) {
3844 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3845 + __func__, cmdnr);
3846 + ret = -EFAULT;
3847 + }
3848 +
3849 + /* Done. */
3850 + goto out;
3851 + }
3852 + break;
3853 +
3854 + /* Maps a user address given process and vc handle. */
3855 + case VMCS_SM_CMD_MAPPED_USR_ADDRESS:
3856 + {
3857 + struct vmcs_sm_ioctl_map ioparam;
3858 +
3859 + /* Get parameter data. */
3860 + if (copy_from_user(&ioparam,
3861 + (void *)arg, sizeof(ioparam)) != 0) {
3862 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3863 + __func__, cmdnr);
3864 + ret = -EFAULT;
3865 + goto out;
3866 + }
3867 +
3868 + /*
3869 + * Return the address information from the mapping,
3870 + * 0 (ie NULL) if it cannot locate the actual mapping.
3871 + */
3872 + ioparam.addr =
3873 + vmcs_sm_usr_address_from_pid_and_usr_handle
3874 + (ioparam.pid, ioparam.handle);
3875 +
3876 + if (copy_to_user((void *)arg,
3877 + &ioparam, sizeof(ioparam)) != 0) {
3878 + pr_err("[%s]: failed to copy-to-user for cmd %x\n",
3879 + __func__, cmdnr);
3880 + ret = -EFAULT;
3881 + }
3882 +
3883 + /* Done. */
3884 + goto out;
3885 + }
3886 + break;
3887 +
3888 + /* Flush the cache for a given mapping. */
3889 + case VMCS_SM_CMD_FLUSH:
3890 + {
3891 + struct vmcs_sm_ioctl_cache ioparam;
3892 +
3893 + /* Get parameter data. */
3894 + if (copy_from_user(&ioparam,
3895 + (void *)arg, sizeof(ioparam)) != 0) {
3896 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3897 + __func__, cmdnr);
3898 + ret = -EFAULT;
3899 + goto out;
3900 + }
3901 +
3902 + /* Locate resource from GUID. */
3903 + resource =
3904 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3905 +
3906 + if ((resource != NULL) && resource->res_cached) {
3907 + dma_addr_t phys_addr = 0;
3908 +
3909 + resource->res_stats[FLUSH]++;
3910 +
3911 + phys_addr =
3912 + (dma_addr_t)((uint32_t)
3913 + resource->res_base_mem &
3914 + 0x3FFFFFFF);
3915 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3916 +
3917 + /* L1 cache flush */
3918 + down_read(&current->mm->mmap_sem);
3919 + vcsm_vma_cache_clean_page_range((unsigned long)
3920 + ioparam.addr,
3921 + (unsigned long)
3922 + ioparam.addr +
3923 + ioparam.size);
3924 + up_read(&current->mm->mmap_sem);
3925 +
3926 + /* L2 cache flush */
3927 + outer_clean_range(phys_addr,
3928 + phys_addr +
3929 + (size_t) ioparam.size);
3930 + } else if (resource == NULL) {
3931 + ret = -EINVAL;
3932 + goto out;
3933 + }
3934 +
3935 + if (resource)
3936 + vmcs_sm_release_resource(resource, 0);
3937 +
3938 + /* Done. */
3939 + goto out;
3940 + }
3941 + break;
3942 +
3943 + /* Invalidate the cache for a given mapping. */
3944 + case VMCS_SM_CMD_INVALID:
3945 + {
3946 + struct vmcs_sm_ioctl_cache ioparam;
3947 +
3948 + /* Get parameter data. */
3949 + if (copy_from_user(&ioparam,
3950 + (void *)arg, sizeof(ioparam)) != 0) {
3951 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
3952 + __func__, cmdnr);
3953 + ret = -EFAULT;
3954 + goto out;
3955 + }
3956 +
3957 + /* Locate resource from GUID. */
3958 + resource =
3959 + vmcs_sm_acquire_resource(file_data, ioparam.handle);
3960 +
3961 + if ((resource != NULL) && resource->res_cached) {
3962 + dma_addr_t phys_addr = 0;
3963 +
3964 + resource->res_stats[INVALID]++;
3965 +
3966 + phys_addr =
3967 + (dma_addr_t)((uint32_t)
3968 + resource->res_base_mem &
3969 + 0x3FFFFFFF);
3970 + phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
3971 +
3972 + /* L2 cache invalidate */
3973 + outer_inv_range(phys_addr,
3974 + phys_addr +
3975 + (size_t) ioparam.size);
3976 +
3977 + /* L1 cache invalidate */
3978 + down_read(&current->mm->mmap_sem);
3979 + vcsm_vma_cache_clean_page_range((unsigned long)
3980 + ioparam.addr,
3981 + (unsigned long)
3982 + ioparam.addr +
3983 + ioparam.size);
3984 + up_read(&current->mm->mmap_sem);
3985 + } else if (resource == NULL) {
3986 + ret = -EINVAL;
3987 + goto out;
3988 + }
3989 +
3990 + if (resource)
3991 + vmcs_sm_release_resource(resource, 0);
3992 +
3993 + /* Done. */
3994 + goto out;
3995 + }
3996 + break;
3997 +
3998 + /* Flush/Invalidate the cache for a given mapping. */
3999 + case VMCS_SM_CMD_CLEAN_INVALID:
4000 + {
4001 + int i;
4002 + struct vmcs_sm_ioctl_clean_invalid ioparam;
4003 +
4004 + /* Get parameter data. */
4005 + if (copy_from_user(&ioparam,
4006 + (void *)arg, sizeof(ioparam)) != 0) {
4007 + pr_err("[%s]: failed to copy-from-user for cmd %x\n",
4008 + __func__, cmdnr);
4009 + ret = -EFAULT;
4010 + goto out;
4011 + }
4012 + for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
4013 + switch (ioparam.s[i].cmd) {
4014 + case VCSM_CACHE_OP_INV: /* L1/L2 invalidate virtual range */
4015 + case VCSM_CACHE_OP_FLUSH: /* L1/L2 clean physical range */
4016 + case VCSM_CACHE_OP_CLEAN: /* L1/L2 clean+invalidate all */
4017 + /* Locate resource from GUID. */
4018 + resource =
4019 + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
4020 +
4021 + if ((resource != NULL) && resource->res_cached) {
4022 + unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE - 1);
4023 + unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
4024 +
4025 + resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID : FLUSH]++;
4026 +
4027 + /* L1/L2 cache flush */
4028 + down_read(&current->mm->mmap_sem);
4029 + vcsm_vma_cache_clean_page_range(base, end);
4030 + up_read(&current->mm->mmap_sem);
4031 + } else if (resource == NULL) {
4032 + ret = -EINVAL;
4033 + goto out;
4034 + }
4035 +
4036 + if (resource)
4037 + vmcs_sm_release_resource(resource, 0);
4038 +
4039 + break;
4040 + default:
4041 + break; /* NOOP */
4042 + }
4043 + }
4044 + }
4045 + break;
4046 + /* Flush/Invalidate the cache for a given mapping. */
4047 + case VMCS_SM_CMD_CLEAN_INVALID2:
4048 + {
4049 + int i, j;
4050 + struct vmcs_sm_ioctl_clean_invalid2 ioparam;
4051 + struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
4052 +
4053 + /* Get parameter data. */
4054 + if (copy_from_user(&ioparam,
4055 + (void *)arg, sizeof(ioparam)) != 0) {
4056 + pr_err("[%s]: failed to copy-from-user header for cmd %x\n",
4057 + __func__, cmdnr);
4058 + ret = -EFAULT;
4059 + goto out;
4060 + }
4061 + block = kmalloc(ioparam.op_count *
4062 + sizeof(struct vmcs_sm_ioctl_clean_invalid_block),
4063 + GFP_KERNEL);
4064 + if (!block) {
4065 + ret = -EFAULT;
4066 + goto out;
4067 + }
4068 + if (copy_from_user(block,
4069 + (void *)(arg + sizeof(ioparam)), ioparam.op_count * sizeof(struct vmcs_sm_ioctl_clean_invalid_block)) != 0) {
4070 + pr_err("[%s]: failed to copy-from-user payload for cmd %x\n",
4071 + __func__, cmdnr);
4072 + ret = -EFAULT;
4073 + goto out;
4074 + }
4075 +
4076 + for (i = 0; i < ioparam.op_count; i++) {
4077 + const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
4078 + void (*op_fn)(const void *, const void *);
4079 +
4080 + switch(op->invalidate_mode & 3) {
4081 + case VCSM_CACHE_OP_INV:
4082 + op_fn = dmac_inv_range;
4083 + break;
4084 + case VCSM_CACHE_OP_CLEAN:
4085 + op_fn = dmac_clean_range;
4086 + break;
4087 + case VCSM_CACHE_OP_FLUSH:
4088 + op_fn = dmac_flush_range;
4089 + break;
4090 + default:
4091 + op_fn = 0;
4092 + break;
4093 + }
4094 +
4095 + if ((op->invalidate_mode & ~3) != 0) {
4096 + ret = -EINVAL;
4097 + break;
4098 + }
4099 +
4100 + if (op_fn == 0)
4101 + continue;
4102 +
4103 + for (j = 0; j < op->block_count; ++j) {
4104 + const char * const base = (const char *)op->start_address + j * op->inter_block_stride;
4105 + const char * const end = base + op->block_size;
4106 + op_fn(base, end);
4107 + }
4108 + }
4109 + kfree(block);
4110 + }
4111 + break;
4112 +
4113 + default:
4114 + {
4115 + ret = -EINVAL;
4116 + goto out;
4117 + }
4118 + break;
4119 + }
4120 +
4121 +out:
4122 + return ret;
4123 +}
4124 +
4125 +/* Device operations that we managed in this driver. */
4126 +static const struct file_operations vmcs_sm_ops = {
4127 + .owner = THIS_MODULE,
4128 + .unlocked_ioctl = vc_sm_ioctl,
4129 + .open = vc_sm_open,
4130 + .release = vc_sm_release,
4131 + .mmap = vc_sm_mmap,
4132 +};
4133 +
4134 +/* Creation of device. */
4135 +static int vc_sm_create_sharedmemory(void)
4136 +{
4137 + int ret;
4138 +
4139 + if (sm_state == NULL) {
4140 + ret = -ENOMEM;
4141 + goto out;
4142 + }
4143 +
4144 + /* Create a device class for creating dev nodes. */
4145 + sm_state->sm_class = class_create(THIS_MODULE, "vc-sm");
4146 + if (IS_ERR(sm_state->sm_class)) {
4147 + pr_err("[%s]: unable to create device class\n", __func__);
4148 + ret = PTR_ERR(sm_state->sm_class);
4149 + goto out;
4150 + }
4151 +
4152 + /* Create a character driver. */
4153 + ret = alloc_chrdev_region(&sm_state->sm_devid,
4154 + DEVICE_MINOR, 1, DEVICE_NAME);
4155 + if (ret != 0) {
4156 + pr_err("[%s]: unable to allocate device number\n", __func__);
4157 + goto out_dev_class_destroy;
4158 + }
4159 +
4160 + cdev_init(&sm_state->sm_cdev, &vmcs_sm_ops);
4161 + ret = cdev_add(&sm_state->sm_cdev, sm_state->sm_devid, 1);
4162 + if (ret != 0) {
4163 + pr_err("[%s]: unable to register device\n", __func__);
4164 + goto out_chrdev_unreg;
4165 + }
4166 +
4167 + /* Create a device node. */
4168 + sm_state->sm_dev = device_create(sm_state->sm_class,
4169 + NULL,
4170 + MKDEV(MAJOR(sm_state->sm_devid),
4171 + DEVICE_MINOR), NULL,
4172 + DEVICE_NAME);
4173 + if (IS_ERR(sm_state->sm_dev)) {
4174 + pr_err("[%s]: unable to create device node\n", __func__);
4175 + ret = PTR_ERR(sm_state->sm_dev);
4176 + goto out_chrdev_del;
4177 + }
4178 +
4179 + goto out;
4180 +
4181 +out_chrdev_del:
4182 + cdev_del(&sm_state->sm_cdev);
4183 +out_chrdev_unreg:
4184 + unregister_chrdev_region(sm_state->sm_devid, 1);
4185 +out_dev_class_destroy:
4186 + class_destroy(sm_state->sm_class);
4187 + sm_state->sm_class = NULL;
4188 +out:
4189 + return ret;
4190 +}
4191 +
4192 +/* Termination of the device. */
4193 +static int vc_sm_remove_sharedmemory(void)
4194 +{
4195 + int ret;
4196 +
4197 + if (sm_state == NULL) {
4198 + /* Nothing to do. */
4199 + ret = 0;
4200 + goto out;
4201 + }
4202 +
4203 + /* Remove the sharedmemory character driver. */
4204 + cdev_del(&sm_state->sm_cdev);
4205 +
4206 + /* Unregister region. */
4207 + unregister_chrdev_region(sm_state->sm_devid, 1);
4208 +
4209 + ret = 0;
4210 + goto out;
4211 +
4212 +out:
4213 + return ret;
4214 +}
4215 +
4216 +/* Videocore connected. */
4217 +static void vc_sm_connected_init(void)
4218 +{
4219 + int ret;
4220 + VCHI_INSTANCE_T vchi_instance;
4221 + VCHI_CONNECTION_T *vchi_connection = NULL;
4222 +
4223 + pr_info("[%s]: start\n", __func__);
4224 +
4225 + /*
4226 + * Initialize and create a VCHI connection for the shared memory service
4227 + * running on videocore.
4228 + */
4229 + ret = vchi_initialise(&vchi_instance);
4230 + if (ret != 0) {
4231 + pr_err("[%s]: failed to initialise VCHI instance (ret=%d)\n",
4232 + __func__, ret);
4233 +
4234 + ret = -EIO;
4235 + goto err_free_mem;
4236 + }
4237 +
4238 + ret = vchi_connect(NULL, 0, vchi_instance);
4239 + if (ret != 0) {
4240 + pr_err("[%s]: failed to connect VCHI instance (ret=%d)\n",
4241 + __func__, ret);
4242 +
4243 + ret = -EIO;
4244 + goto err_free_mem;
4245 + }
4246 +
4247 + /* Initialize an instance of the shared memory service. */
4248 + sm_state->sm_handle =
4249 + vc_vchi_sm_init(vchi_instance, &vchi_connection, 1);
4250 + if (sm_state->sm_handle == NULL) {
4251 + pr_err("[%s]: failed to initialize shared memory service\n",
4252 + __func__);
4253 +
4254 + ret = -EPERM;
4255 + goto err_free_mem;
4256 + }
4257 +
4258 + /* Create a debug fs directory entry (root). */
4259 + sm_state->dir_root = debugfs_create_dir(VC_SM_DIR_ROOT_NAME, NULL);
4260 + if (!sm_state->dir_root) {
4261 + pr_err("[%s]: failed to create \'%s\' directory entry\n",
4262 + __func__, VC_SM_DIR_ROOT_NAME);
4263 +
4264 + ret = -EPERM;
4265 + goto err_stop_sm_service;
4266 + }
4267 +
4268 + sm_state->dir_state.show = &vc_sm_global_state_show;
4269 + sm_state->dir_state.dir_entry = debugfs_create_file(VC_SM_STATE,
4270 + 0444, sm_state->dir_root, &sm_state->dir_state,
4271 + &vc_sm_debug_fs_fops);
4272 +
4273 + sm_state->dir_stats.show = &vc_sm_global_statistics_show;
4274 + sm_state->dir_stats.dir_entry = debugfs_create_file(VC_SM_STATS,
4275 + 0444, sm_state->dir_root, &sm_state->dir_stats,
4276 + &vc_sm_debug_fs_fops);
4277 +
4278 + /* Create the proc entry children. */
4279 + sm_state->dir_alloc = debugfs_create_dir(VC_SM_DIR_ALLOC_NAME,
4280 + sm_state->dir_root);
4281 +
4282 + /* Create a shared memory device. */
4283 + ret = vc_sm_create_sharedmemory();
4284 + if (ret != 0) {
4285 + pr_err("[%s]: failed to create shared memory device\n",
4286 + __func__);
4287 + goto err_remove_debugfs;
4288 + }
4289 +
4290 + INIT_LIST_HEAD(&sm_state->map_list);
4291 + INIT_LIST_HEAD(&sm_state->resource_list);
4292 +
4293 + sm_state->data_knl = vc_sm_create_priv_data(0);
4294 + if (sm_state->data_knl == NULL) {
4295 + pr_err("[%s]: failed to create kernel private data tracker\n",
4296 + __func__);
4297 + goto err_remove_shared_memory;
4298 + }
4299 +
4300 + /* Done! */
4301 + sm_inited = 1;
4302 + goto out;
4303 +
4304 +err_remove_shared_memory:
4305 + vc_sm_remove_sharedmemory();
4306 +err_remove_debugfs:
4307 + debugfs_remove_recursive(sm_state->dir_root);
4308 +err_stop_sm_service:
4309 + vc_vchi_sm_stop(&sm_state->sm_handle);
4310 +err_free_mem:
4311 + kfree(sm_state);
4312 +out:
4313 + pr_info("[%s]: end - returning %d\n", __func__, ret);
4314 +}
4315 +
4316 +/* Driver loading. */
4317 +static int bcm2835_vcsm_probe(struct platform_device *pdev)
4318 +{
4319 + pr_info("vc-sm: Videocore shared memory driver\n");
4320 +
4321 + sm_state = kzalloc(sizeof(*sm_state), GFP_KERNEL);
4322 + if (!sm_state)
4323 + return -ENOMEM;
4324 + sm_state->pdev = pdev;
4325 + mutex_init(&sm_state->lock);
4326 + mutex_init(&sm_state->map_lock);
4327 +
4328 + vchiq_add_connected_callback(vc_sm_connected_init);
4329 + return 0;
4330 +}
4331 +
4332 +/* Driver unloading. */
4333 +static int bcm2835_vcsm_remove(struct platform_device *pdev)
4334 +{
4335 + pr_debug("[%s]: start\n", __func__);
4336 + if (sm_inited) {
4337 + /* Remove shared memory device. */
4338 + vc_sm_remove_sharedmemory();
4339 +
4340 + /* Remove all proc entries. */
4341 + debugfs_remove_recursive(sm_state->dir_root);
4342 +
4343 + /* Stop the videocore shared memory service. */
4344 + vc_vchi_sm_stop(&sm_state->sm_handle);
4345 +
4346 + /* Free the memory for the state structure. */
4347 + mutex_destroy(&(sm_state->map_lock));
4348 + kfree(sm_state);
4349 + }
4350 +
4351 + pr_debug("[%s]: end\n", __func__);
4352 + return 0;
4353 +}
4354 +
4355 +#if defined(__KERNEL__)
4356 +/* Allocate a shared memory handle and block. */
4357 +int vc_sm_alloc(struct vc_sm_alloc_t *alloc, int *handle)
4358 +{
4359 + struct vmcs_sm_ioctl_alloc ioparam = { 0 };
4360 + int ret;
4361 + struct sm_resource_t *resource;
4362 +
4363 + /* Validate we can work with this device. */
4364 + if (sm_state == NULL || alloc == NULL || handle == NULL) {
4365 + pr_err("[%s]: invalid input\n", __func__);
4366 + return -EPERM;
4367 + }
4368 +
4369 + ioparam.size = alloc->base_unit;
4370 + ioparam.num = alloc->num_unit;
4371 + ioparam.cached =
4372 + alloc->type == VC_SM_ALLOC_CACHED ? VMCS_SM_CACHE_VC : 0;
4373 +
4374 + ret = vc_sm_ioctl_alloc(sm_state->data_knl, &ioparam);
4375 +
4376 + if (ret == 0) {
4377 + resource =
4378 + vmcs_sm_acquire_resource(sm_state->data_knl,
4379 + ioparam.handle);
4380 + if (resource) {
4381 + resource->pid = 0;
4382 + vmcs_sm_release_resource(resource, 0);
4383 +
4384 + /* Assign valid handle at this time. */
4385 + *handle = ioparam.handle;
4386 + } else {
4387 + ret = -ENOMEM;
4388 + }
4389 + }
4390 +
4391 + return ret;
4392 +}
4393 +EXPORT_SYMBOL_GPL(vc_sm_alloc);
4394 +
4395 +/* Get an internal resource handle mapped from the external one. */
4396 +int vc_sm_int_handle(int handle)
4397 +{
4398 + struct sm_resource_t *resource;
4399 + int ret = 0;
4400 +
4401 + /* Validate we can work with this device. */
4402 + if (sm_state == NULL || handle == 0) {
4403 + pr_err("[%s]: invalid input\n", __func__);
4404 + return 0;
4405 + }
4406 +
4407 + /* Locate resource from GUID. */
4408 + resource = vmcs_sm_acquire_resource(sm_state->data_knl, handle);
4409 + if (resource) {
4410 + ret = resource->res_handle;
4411 + vmcs_sm_release_resource(resource, 0);
4412 + }
4413 +
4414 + return ret;
4415 +}
4416 +EXPORT_SYMBOL_GPL(vc_sm_int_handle);
4417 +
4418 +/* Free a previously allocated shared memory handle and block. */
4419 +int vc_sm_free(int handle)
4420 +{
4421 + struct vmcs_sm_ioctl_free ioparam = { handle };
4422 +
4423 + /* Validate we can work with this device. */
4424 + if (sm_state == NULL || handle == 0) {
4425 + pr_err("[%s]: invalid input\n", __func__);
4426 + return -EPERM;
4427 + }
4428 +
4429 + return vc_sm_ioctl_free(sm_state->data_knl, &ioparam);
4430 +}
4431 +EXPORT_SYMBOL_GPL(vc_sm_free);
4432 +
4433 +/* Lock a memory handle for use by kernel. */
4434 +int vc_sm_lock(int handle, enum vc_sm_lock_cache_mode mode,
4435 + unsigned long *data)
4436 +{
4437 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4438 + int ret;
4439 +
4440 + /* Validate we can work with this device. */
4441 + if (sm_state == NULL || handle == 0 || data == NULL) {
4442 + pr_err("[%s]: invalid input\n", __func__);
4443 + return -EPERM;
4444 + }
4445 +
4446 + *data = 0;
4447 +
4448 + ioparam.handle = handle;
4449 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4450 + &ioparam,
4451 + 1,
4452 + ((mode ==
4453 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4454 + VMCS_SM_CACHE_NONE), 0);
4455 +
4456 + *data = ioparam.addr;
4457 + return ret;
4458 +}
4459 +EXPORT_SYMBOL_GPL(vc_sm_lock);
4460 +
4461 +/* Unlock a memory handle in use by kernel. */
4462 +int vc_sm_unlock(int handle, int flush, int no_vc_unlock)
4463 +{
4464 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4465 +
4466 + /* Validate we can work with this device. */
4467 + if (sm_state == NULL || handle == 0) {
4468 + pr_err("[%s]: invalid input\n", __func__);
4469 + return -EPERM;
4470 + }
4471 +
4472 + ioparam.handle = handle;
4473 + return vc_sm_ioctl_unlock(sm_state->data_knl,
4474 + &ioparam, flush, 0, no_vc_unlock);
4475 +}
4476 +EXPORT_SYMBOL_GPL(vc_sm_unlock);
4477 +
4478 +/* Map a shared memory region for use by kernel. */
4479 +int vc_sm_map(int handle, unsigned int sm_addr,
4480 + enum vc_sm_lock_cache_mode mode, unsigned long *data)
4481 +{
4482 + struct vmcs_sm_ioctl_lock_unlock ioparam;
4483 + int ret;
4484 +
4485 + /* Validate we can work with this device. */
4486 + if (sm_state == NULL || handle == 0 || data == NULL || sm_addr == 0) {
4487 + pr_err("[%s]: invalid input\n", __func__);
4488 + return -EPERM;
4489 + }
4490 +
4491 + *data = 0;
4492 +
4493 + ioparam.handle = handle;
4494 + ret = vc_sm_ioctl_lock(sm_state->data_knl,
4495 + &ioparam,
4496 + 1,
4497 + ((mode ==
4498 + VC_SM_LOCK_CACHED) ? VMCS_SM_CACHE_HOST :
4499 + VMCS_SM_CACHE_NONE), sm_addr);
4500 +
4501 + *data = ioparam.addr;
4502 + return ret;
4503 +}
4504 +EXPORT_SYMBOL_GPL(vc_sm_map);
4505 +
4506 +/* Import a dmabuf to be shared with VC. */
4507 +int vc_sm_import_dmabuf(struct dma_buf *dmabuf, int *handle)
4508 +{
4509 + struct vmcs_sm_ioctl_import_dmabuf ioparam = { 0 };
4510 + int ret;
4511 + struct sm_resource_t *resource;
4512 +
4513 + /* Validate we can work with this device. */
4514 + if (!sm_state || !dmabuf || !handle) {
4515 + pr_err("[%s]: invalid input\n", __func__);
4516 + return -EPERM;
4517 + }
4518 +
4519 + ioparam.cached = 0;
4520 + strcpy(ioparam.name, "KRNL DMABUF");
4521 +
4522 + ret = vc_sm_ioctl_import_dmabuf(sm_state->data_knl, &ioparam, dmabuf);
4523 +
4524 + if (!ret) {
4525 + resource = vmcs_sm_acquire_resource(sm_state->data_knl,
4526 + ioparam.handle);
4527 + if (resource) {
4528 + resource->pid = 0;
4529 + vmcs_sm_release_resource(resource, 0);
4530 +
4531 + /* Assign valid handle at this time.*/
4532 + *handle = ioparam.handle;
4533 + } else {
4534 + ret = -ENOMEM;
4535 + }
4536 + }
4537 +
4538 + return ret;
4539 +}
4540 +EXPORT_SYMBOL_GPL(vc_sm_import_dmabuf);
4541 +#endif
4542 +
4543 +/*
4544 + * Register the driver with device tree
4545 + */
4546 +
4547 +static const struct of_device_id bcm2835_vcsm_of_match[] = {
4548 + {.compatible = "raspberrypi,bcm2835-vcsm",},
4549 + { /* sentinel */ },
4550 +};
4551 +
4552 +MODULE_DEVICE_TABLE(of, bcm2835_vcsm_of_match);
4553 +
4554 +static struct platform_driver bcm2835_vcsm_driver = {
4555 + .probe = bcm2835_vcsm_probe,
4556 + .remove = bcm2835_vcsm_remove,
4557 + .driver = {
4558 + .name = DRIVER_NAME,
4559 + .owner = THIS_MODULE,
4560 + .of_match_table = bcm2835_vcsm_of_match,
4561 + },
4562 +};
4563 +
4564 +module_platform_driver(bcm2835_vcsm_driver);
4565 +
4566 +MODULE_AUTHOR("Broadcom");
4567 +MODULE_DESCRIPTION("VideoCore SharedMemory Driver");
4568 +MODULE_LICENSE("GPL v2");
4569 --- /dev/null
4570 +++ b/include/linux/broadcom/vmcs_sm_ioctl.h
4571 @@ -0,0 +1,280 @@
4572 +/*****************************************************************************
4573 +* Copyright 2011 Broadcom Corporation. All rights reserved.
4574 +*
4575 +* Unless you and Broadcom execute a separate written software license
4576 +* agreement governing use of this software, this software is licensed to you
4577 +* under the terms of the GNU General Public License version 2, available at
4578 +* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
4579 +*
4580 +* Notwithstanding the above, under no circumstances may you combine this
4581 +* software in any way with any other Broadcom software provided under a
4582 +* license other than the GPL, without Broadcom's express prior written
4583 +* consent.
4584 +*
4585 +*****************************************************************************/
4586 +
4587 +#if !defined(__VMCS_SM_IOCTL_H__INCLUDED__)
4588 +#define __VMCS_SM_IOCTL_H__INCLUDED__
4589 +
4590 +/* ---- Include Files ---------------------------------------------------- */
4591 +
4592 +#if defined(__KERNEL__)
4593 +#include <linux/types.h> /* Needed for standard types */
4594 +#else
4595 +#include <stdint.h>
4596 +#endif
4597 +
4598 +#include <linux/ioctl.h>
4599 +
4600 +/* ---- Constants and Types ---------------------------------------------- */
4601 +
4602 +#define VMCS_SM_RESOURCE_NAME 32
4603 +#define VMCS_SM_RESOURCE_NAME_DEFAULT "sm-host-resource"
4604 +
4605 +/* Type define used to create unique IOCTL number */
4606 +#define VMCS_SM_MAGIC_TYPE 'I'
4607 +
4608 +/* IOCTL commands */
4609 +enum vmcs_sm_cmd_e {
4610 + VMCS_SM_CMD_ALLOC = 0x5A, /* Start at 0x5A arbitrarily */
4611 + VMCS_SM_CMD_ALLOC_SHARE,
4612 + VMCS_SM_CMD_LOCK,
4613 + VMCS_SM_CMD_LOCK_CACHE,
4614 + VMCS_SM_CMD_UNLOCK,
4615 + VMCS_SM_CMD_RESIZE,
4616 + VMCS_SM_CMD_UNMAP,
4617 + VMCS_SM_CMD_FREE,
4618 + VMCS_SM_CMD_FLUSH,
4619 + VMCS_SM_CMD_INVALID,
4620 +
4621 + VMCS_SM_CMD_SIZE_USR_HANDLE,
4622 + VMCS_SM_CMD_CHK_USR_HANDLE,
4623 +
4624 + VMCS_SM_CMD_MAPPED_USR_HANDLE,
4625 + VMCS_SM_CMD_MAPPED_USR_ADDRESS,
4626 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,
4627 + VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,
4628 + VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,
4629 +
4630 + VMCS_SM_CMD_VC_WALK_ALLOC,
4631 + VMCS_SM_CMD_HOST_WALK_MAP,
4632 + VMCS_SM_CMD_HOST_WALK_PID_ALLOC,
4633 + VMCS_SM_CMD_HOST_WALK_PID_MAP,
4634 +
4635 + VMCS_SM_CMD_CLEAN_INVALID,
4636 + VMCS_SM_CMD_CLEAN_INVALID2,
4637 +
4638 + VMCS_SM_CMD_IMPORT_DMABUF,
4639 +
4640 + VMCS_SM_CMD_LAST /* Do not delete */
4641 +};
4642 +
4643 +/* Cache type supported, conveniently matches the user space definition in
4644 +** user-vcsm.h.
4645 +*/
4646 +enum vmcs_sm_cache_e {
4647 + VMCS_SM_CACHE_NONE,
4648 + VMCS_SM_CACHE_HOST,
4649 + VMCS_SM_CACHE_VC,
4650 + VMCS_SM_CACHE_BOTH,
4651 +};
4652 +
4653 +/* IOCTL Data structures */
4654 +struct vmcs_sm_ioctl_alloc {
4655 + /* user -> kernel */
4656 + unsigned int size;
4657 + unsigned int num;
4658 + enum vmcs_sm_cache_e cached;
4659 + char name[VMCS_SM_RESOURCE_NAME];
4660 +
4661 + /* kernel -> user */
4662 + unsigned int handle;
4663 + /* unsigned int base_addr; */
4664 +};
4665 +
4666 +struct vmcs_sm_ioctl_alloc_share {
4667 + /* user -> kernel */
4668 + unsigned int handle;
4669 + unsigned int size;
4670 +};
4671 +
4672 +struct vmcs_sm_ioctl_free {
4673 + /* user -> kernel */
4674 + unsigned int handle;
4675 + /* unsigned int base_addr; */
4676 +};
4677 +
4678 +struct vmcs_sm_ioctl_lock_unlock {
4679 + /* user -> kernel */
4680 + unsigned int handle;
4681 +
4682 + /* kernel -> user */
4683 + unsigned int addr;
4684 +};
4685 +
4686 +struct vmcs_sm_ioctl_lock_cache {
4687 + /* user -> kernel */
4688 + unsigned int handle;
4689 + enum vmcs_sm_cache_e cached;
4690 +};
4691 +
4692 +struct vmcs_sm_ioctl_resize {
4693 + /* user -> kernel */
4694 + unsigned int handle;
4695 + unsigned int new_size;
4696 +
4697 + /* kernel -> user */
4698 + unsigned int old_size;
4699 +};
4700 +
4701 +struct vmcs_sm_ioctl_map {
4702 + /* user -> kernel */
4703 + /* and kernel -> user */
4704 + unsigned int pid;
4705 + unsigned int handle;
4706 + unsigned int addr;
4707 +
4708 + /* kernel -> user */
4709 + unsigned int size;
4710 +};
4711 +
4712 +struct vmcs_sm_ioctl_walk {
4713 + /* user -> kernel */
4714 + unsigned int pid;
4715 +};
4716 +
4717 +struct vmcs_sm_ioctl_chk {
4718 + /* user -> kernel */
4719 + unsigned int handle;
4720 +
4721 + /* kernel -> user */
4722 + unsigned int addr;
4723 + unsigned int size;
4724 + enum vmcs_sm_cache_e cache;
4725 +};
4726 +
4727 +struct vmcs_sm_ioctl_size {
4728 + /* user -> kernel */
4729 + unsigned int handle;
4730 +
4731 + /* kernel -> user */
4732 + unsigned int size;
4733 +};
4734 +
4735 +struct vmcs_sm_ioctl_cache {
4736 + /* user -> kernel */
4737 + unsigned int handle;
4738 + unsigned int addr;
4739 + unsigned int size;
4740 +};
4741 +
4742 +struct vmcs_sm_ioctl_clean_invalid {
4743 + /* user -> kernel */
4744 + struct {
4745 + unsigned int cmd;
4746 + unsigned int handle;
4747 + unsigned int addr;
4748 + unsigned int size;
4749 + } s[8];
4750 +};
4751 +
4752 +struct vmcs_sm_ioctl_clean_invalid2 {
4753 + uint8_t op_count;
4754 + uint8_t zero[3];
4755 + struct vmcs_sm_ioctl_clean_invalid_block {
4756 + uint16_t invalidate_mode;
4757 + uint16_t block_count;
4758 + void * start_address;
4759 + uint32_t block_size;
4760 + uint32_t inter_block_stride;
4761 + } s[0];
4762 +};
4763 +
4764 +struct vmcs_sm_ioctl_import_dmabuf {
4765 + /* user -> kernel */
4766 + int dmabuf_fd;
4767 + enum vmcs_sm_cache_e cached;
4768 + char name[VMCS_SM_RESOURCE_NAME];
4769 +
4770 + /* kernel -> user */
4771 + unsigned int handle;
4772 +};
4773 +
4774 +/* IOCTL numbers */
4775 +#define VMCS_SM_IOCTL_MEM_ALLOC\
4776 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC,\
4777 + struct vmcs_sm_ioctl_alloc)
4778 +#define VMCS_SM_IOCTL_MEM_ALLOC_SHARE\
4779 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC_SHARE,\
4780 + struct vmcs_sm_ioctl_alloc_share)
4781 +#define VMCS_SM_IOCTL_MEM_LOCK\
4782 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK,\
4783 + struct vmcs_sm_ioctl_lock_unlock)
4784 +#define VMCS_SM_IOCTL_MEM_LOCK_CACHE\
4785 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_LOCK_CACHE,\
4786 + struct vmcs_sm_ioctl_lock_cache)
4787 +#define VMCS_SM_IOCTL_MEM_UNLOCK\
4788 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_UNLOCK,\
4789 + struct vmcs_sm_ioctl_lock_unlock)
4790 +#define VMCS_SM_IOCTL_MEM_RESIZE\
4791 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_RESIZE,\
4792 + struct vmcs_sm_ioctl_resize)
4793 +#define VMCS_SM_IOCTL_MEM_FREE\
4794 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FREE,\
4795 + struct vmcs_sm_ioctl_free)
4796 +#define VMCS_SM_IOCTL_MEM_FLUSH\
4797 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_FLUSH,\
4798 + struct vmcs_sm_ioctl_cache)
4799 +#define VMCS_SM_IOCTL_MEM_INVALID\
4800 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_INVALID,\
4801 + struct vmcs_sm_ioctl_cache)
4802 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID\
4803 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID,\
4804 + struct vmcs_sm_ioctl_clean_invalid)
4805 +#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID2\
4806 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID2,\
4807 + struct vmcs_sm_ioctl_clean_invalid2)
4808 +
4809 +#define VMCS_SM_IOCTL_SIZE_USR_HDL\
4810 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_SIZE_USR_HANDLE,\
4811 + struct vmcs_sm_ioctl_size)
4812 +#define VMCS_SM_IOCTL_CHK_USR_HDL\
4813 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CHK_USR_HANDLE,\
4814 + struct vmcs_sm_ioctl_chk)
4815 +
4816 +#define VMCS_SM_IOCTL_MAP_USR_HDL\
4817 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_HANDLE,\
4818 + struct vmcs_sm_ioctl_map)
4819 +#define VMCS_SM_IOCTL_MAP_USR_ADDRESS\
4820 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_USR_ADDRESS,\
4821 + struct vmcs_sm_ioctl_map)
4822 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_ADDR\
4823 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_ADDR,\
4824 + struct vmcs_sm_ioctl_map)
4825 +#define VMCS_SM_IOCTL_MAP_VC_HDL_FR_HDL\
4826 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_HDL_FROM_HDL,\
4827 + struct vmcs_sm_ioctl_map)
4828 +#define VMCS_SM_IOCTL_MAP_VC_ADDR_FR_HDL\
4829 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_MAPPED_VC_ADDR_FROM_HDL,\
4830 + struct vmcs_sm_ioctl_map)
4831 +
4832 +#define VMCS_SM_IOCTL_VC_WALK_ALLOC\
4833 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_VC_WALK_ALLOC)
4834 +#define VMCS_SM_IOCTL_HOST_WALK_MAP\
4835 + _IO(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_MAP)
4836 +#define VMCS_SM_IOCTL_HOST_WALK_PID_ALLOC\
4837 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_ALLOC,\
4838 + struct vmcs_sm_ioctl_walk)
4839 +#define VMCS_SM_IOCTL_HOST_WALK_PID_MAP\
4840 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_HOST_WALK_PID_MAP,\
4841 + struct vmcs_sm_ioctl_walk)
4842 +
4843 +#define VMCS_SM_IOCTL_MEM_IMPORT_DMABUF\
4844 + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_IMPORT_DMABUF,\
4845 + struct vmcs_sm_ioctl_import_dmabuf)
4846 +
4847 +/* ---- Variable Externs ------------------------------------------------- */
4848 +
4849 +/* ---- Function Prototypes ---------------------------------------------- */
4850 +
4851 +#endif /* __VMCS_SM_IOCTL_H__INCLUDED__ */