1 From c9e2d1daa32fd2267d3a61ae3afc2f429746a01f Mon Sep 17 00:00:00 2001
2 From: popcornmix <popcornmix@gmail.com>
3 Date: Tue, 2 Jul 2013 23:42:01 +0100
4 Subject: [PATCH 06/54] bcm2708 vchiq driver
6 Signed-off-by: popcornmix <popcornmix@gmail.com>
8 vchiq: create_pagelist copes with vmalloc memory
10 Signed-off-by: Daniel Stone <daniels@collabora.com>
12 vchiq: fix the shim message release
14 Signed-off-by: Daniel Stone <daniels@collabora.com>
16 vchiq: export additional symbols
18 Signed-off-by: Daniel Stone <daniels@collabora.com>
20 drivers/misc/Kconfig | 1 +
21 drivers/misc/Makefile | 1 +
22 drivers/misc/vc04_services/Kconfig | 9 +
23 drivers/misc/vc04_services/Makefile | 17 +
24 .../interface/vchi/connections/connection.h | 328 ++
25 .../interface/vchi/message_drivers/message.h | 204 ++
26 drivers/misc/vc04_services/interface/vchi/vchi.h | 373 ++
27 .../misc/vc04_services/interface/vchi/vchi_cfg.h | 224 ++
28 .../interface/vchi/vchi_cfg_internal.h | 71 +
29 .../vc04_services/interface/vchi/vchi_common.h | 163 +
30 .../misc/vc04_services/interface/vchi/vchi_mh.h | 42 +
31 .../misc/vc04_services/interface/vchiq_arm/vchiq.h | 40 +
32 .../vc04_services/interface/vchiq_arm/vchiq_2835.h | 42 +
33 .../interface/vchiq_arm/vchiq_2835_arm.c | 561 +++
34 .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 2813 ++++++++++++++
35 .../vc04_services/interface/vchiq_arm/vchiq_arm.h | 212 ++
36 .../interface/vchiq_arm/vchiq_build_info.h | 37 +
37 .../vc04_services/interface/vchiq_arm/vchiq_cfg.h | 60 +
38 .../interface/vchiq_arm/vchiq_connected.c | 119 +
39 .../interface/vchiq_arm/vchiq_connected.h | 50 +
40 .../vc04_services/interface/vchiq_arm/vchiq_core.c | 3824 ++++++++++++++++++++
41 .../vc04_services/interface/vchiq_arm/vchiq_core.h | 706 ++++
42 .../interface/vchiq_arm/vchiq_genversion | 87 +
43 .../vc04_services/interface/vchiq_arm/vchiq_if.h | 188 +
44 .../interface/vchiq_arm/vchiq_ioctl.h | 129 +
45 .../interface/vchiq_arm/vchiq_kern_lib.c | 456 +++
46 .../interface/vchiq_arm/vchiq_memdrv.h | 71 +
47 .../interface/vchiq_arm/vchiq_pagelist.h | 58 +
48 .../vc04_services/interface/vchiq_arm/vchiq_proc.c | 253 ++
49 .../vc04_services/interface/vchiq_arm/vchiq_shim.c | 828 +++++
50 .../vc04_services/interface/vchiq_arm/vchiq_util.c | 151 +
51 .../vc04_services/interface/vchiq_arm/vchiq_util.h | 81 +
52 .../interface/vchiq_arm/vchiq_version.c | 59 +
53 33 files changed, 12258 insertions(+)
54 create mode 100644 drivers/misc/vc04_services/Kconfig
55 create mode 100644 drivers/misc/vc04_services/Makefile
56 create mode 100644 drivers/misc/vc04_services/interface/vchi/connections/connection.h
57 create mode 100644 drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
58 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi.h
59 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
60 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
61 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_common.h
62 create mode 100644 drivers/misc/vc04_services/interface/vchi/vchi_mh.h
63 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
64 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
65 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
66 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
67 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
68 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
69 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
70 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
71 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
72 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
73 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
74 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
75 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
76 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
77 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
78 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
79 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
80 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
81 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
82 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
83 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
84 create mode 100644 drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
86 --- a/drivers/misc/Kconfig
87 +++ b/drivers/misc/Kconfig
88 @@ -524,6 +524,7 @@ source "drivers/misc/carma/Kconfig"
89 source "drivers/misc/altera-stapl/Kconfig"
90 source "drivers/misc/mei/Kconfig"
91 source "drivers/misc/vmw_vmci/Kconfig"
92 +source "drivers/misc/vc04_services/Kconfig"
93 source "drivers/misc/mic/Kconfig"
94 source "drivers/misc/genwqe/Kconfig"
96 --- a/drivers/misc/Makefile
97 +++ b/drivers/misc/Makefile
98 @@ -52,5 +52,6 @@ obj-$(CONFIG_INTEL_MEI) += mei/
99 obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
100 obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
101 obj-$(CONFIG_SRAM) += sram.o
102 +obj-y += vc04_services/
104 obj-$(CONFIG_GENWQE) += genwqe/
106 +++ b/drivers/misc/vc04_services/Kconfig
108 +config BCM2708_VCHIQ
109 + tristate "Videocore VCHIQ"
110 + depends on MACH_BCM2708
113 + Kernel to VideoCore communication interface for the
114 + BCM2708 family of products.
115 + Defaults to Y when the Broadcom Videocore services
116 + are included in the build, N otherwise.
118 +++ b/drivers/misc/vc04_services/Makefile
120 +ifeq ($(CONFIG_MACH_BCM2708),y)
122 +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o
125 + interface/vchiq_arm/vchiq_core.o \
126 + interface/vchiq_arm/vchiq_arm.o \
127 + interface/vchiq_arm/vchiq_kern_lib.o \
128 + interface/vchiq_arm/vchiq_2835_arm.o \
129 + interface/vchiq_arm/vchiq_proc.o \
130 + interface/vchiq_arm/vchiq_shim.o \
131 + interface/vchiq_arm/vchiq_util.o \
132 + interface/vchiq_arm/vchiq_connected.o \
134 +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
138 +++ b/drivers/misc/vc04_services/interface/vchi/connections/connection.h
141 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
143 + * Redistribution and use in source and binary forms, with or without
144 + * modification, are permitted provided that the following conditions
146 + * 1. Redistributions of source code must retain the above copyright
147 + * notice, this list of conditions, and the following disclaimer,
148 + * without modification.
149 + * 2. Redistributions in binary form must reproduce the above copyright
150 + * notice, this list of conditions and the following disclaimer in the
151 + * documentation and/or other materials provided with the distribution.
152 + * 3. The names of the above-listed copyright holders may not be used
153 + * to endorse or promote products derived from this software without
154 + * specific prior written permission.
156 + * ALTERNATIVELY, this software may be distributed under the terms of the
157 + * GNU General Public License ("GPL") version 2, as published by the Free
158 + * Software Foundation.
160 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
161 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
162 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
163 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
164 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
165 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
166 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
167 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
168 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
169 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
170 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
173 +#ifndef CONNECTION_H_
174 +#define CONNECTION_H_
176 +#include <linux/kernel.h>
177 +#include <linux/types.h>
178 +#include <linux/semaphore.h>
180 +#include "interface/vchi/vchi_cfg_internal.h"
181 +#include "interface/vchi/vchi_common.h"
182 +#include "interface/vchi/message_drivers/message.h"
184 +/******************************************************************************
186 + *****************************************************************************/
188 +// Opaque handle for a connection / service pair
189 +typedef struct opaque_vchi_connection_connected_service_handle_t *VCHI_CONNECTION_SERVICE_HANDLE_T;
191 +// opaque handle to the connection state information
192 +typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
194 +typedef struct vchi_connection_t VCHI_CONNECTION_T;
197 +/******************************************************************************
199 + *****************************************************************************/
201 +// Routine to init a connection with a particular low level driver
202 +typedef VCHI_CONNECTION_STATE_T * (*VCHI_CONNECTION_INIT_T)( struct vchi_connection_t * connection,
203 + const VCHI_MESSAGE_DRIVER_T * driver );
205 +// Routine to control CRC enabling at a connection level
206 +typedef int32_t (*VCHI_CONNECTION_CRC_CONTROL_T)( VCHI_CONNECTION_STATE_T *state_handle,
207 + VCHI_CRC_CONTROL_T control );
209 +// Routine to create a service
210 +typedef int32_t (*VCHI_CONNECTION_SERVICE_CONNECT_T)( VCHI_CONNECTION_STATE_T *state_handle,
211 + int32_t service_id,
212 + uint32_t rx_fifo_size,
213 + uint32_t tx_fifo_size,
215 + VCHI_CALLBACK_T callback,
216 + void *callback_param,
218 + int32_t want_unaligned_bulk_rx,
219 + int32_t want_unaligned_bulk_tx,
220 + VCHI_CONNECTION_SERVICE_HANDLE_T *service_handle );
222 +// Routine to close a service
223 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DISCONNECT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle );
225 +// Routine to queue a message
226 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
228 + uint32_t data_size,
229 + VCHI_FLAGS_T flags,
230 + void *msg_handle );
232 +// scatter-gather (vector) message queueing
233 +typedef int32_t (*VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
234 + VCHI_MSG_VECTOR_T *vector,
236 + VCHI_FLAGS_T flags,
237 + void *msg_handle );
239 +// Routine to dequeue a message
240 +typedef int32_t (*VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
242 + uint32_t max_data_size_to_read,
243 + uint32_t *actual_msg_size,
244 + VCHI_FLAGS_T flags );
246 +// Routine to peek at a message
247 +typedef int32_t (*VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
249 + uint32_t *msg_size,
250 + VCHI_FLAGS_T flags );
252 +// Routine to hold a message
253 +typedef int32_t (*VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
255 + uint32_t *msg_size,
256 + VCHI_FLAGS_T flags,
257 + void **message_handle );
259 +// Routine to initialise a received message iterator
260 +typedef int32_t (*VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
261 + VCHI_MSG_ITER_T *iter,
262 + VCHI_FLAGS_T flags );
264 +// Routine to release a held message
265 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_RELEASE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
266 + void *message_handle );
268 +// Routine to get info on a held message
269 +typedef int32_t (*VCHI_CONNECTION_HELD_MSG_INFO_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
270 + void *message_handle,
273 + uint32_t *tx_timestamp,
274 + uint32_t *rx_timestamp );
276 +// Routine to check whether the iterator has a next message
277 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
278 + const VCHI_MSG_ITER_T *iter );
280 +// Routine to advance the iterator
281 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_NEXT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
282 + VCHI_MSG_ITER_T *iter,
284 + uint32_t *msg_size );
286 +// Routine to remove the last message returned by the iterator
287 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_REMOVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
288 + VCHI_MSG_ITER_T *iter );
290 +// Routine to hold the last message returned by the iterator
291 +typedef int32_t (*VCHI_CONNECTION_MSG_ITER_HOLD_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service,
292 + VCHI_MSG_ITER_T *iter,
293 + void **msg_handle );
295 +// Routine to transmit bulk data
296 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
297 + const void *data_src,
298 + uint32_t data_size,
299 + VCHI_FLAGS_T flags,
300 + void *bulk_handle );
302 +// Routine to receive data
303 +typedef int32_t (*VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T)( VCHI_CONNECTION_SERVICE_HANDLE_T service_handle,
305 + uint32_t data_size,
306 + VCHI_FLAGS_T flags,
307 + void *bulk_handle );
309 +// Routine to report if a server is available
310 +typedef int32_t (*VCHI_CONNECTION_SERVER_PRESENT)( VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t peer_flags );
312 +// Routine to report the number of RX slots available
313 +typedef int (*VCHI_CONNECTION_RX_SLOTS_AVAILABLE)( const VCHI_CONNECTION_STATE_T *state );
315 +// Routine to report the RX slot size
316 +typedef uint32_t (*VCHI_CONNECTION_RX_SLOT_SIZE)( const VCHI_CONNECTION_STATE_T *state );
318 +// Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
319 +typedef void (*VCHI_CONNECTION_RX_BULK_BUFFER_ADDED)(VCHI_CONNECTION_STATE_T *state,
322 + MESSAGE_TX_CHANNEL_T channel,
323 + uint32_t channel_params,
324 + uint32_t data_length,
325 + uint32_t data_offset);
327 +// Callback to inform a service that a Xon or Xoff message has been received
328 +typedef void (*VCHI_CONNECTION_FLOW_CONTROL)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, int32_t xoff);
330 +// Callback to inform a service that a server available reply message has been received
331 +typedef void (*VCHI_CONNECTION_SERVER_AVAILABLE_REPLY)(VCHI_CONNECTION_STATE_T *state, int32_t service_id, uint32_t flags);
333 +// Callback to indicate that bulk auxiliary messages have arrived
334 +typedef void (*VCHI_CONNECTION_BULK_AUX_RECEIVED)(VCHI_CONNECTION_STATE_T *state);
336 +// Callback to indicate that bulk auxiliary messages have arrived
337 +typedef void (*VCHI_CONNECTION_BULK_AUX_TRANSMITTED)(VCHI_CONNECTION_STATE_T *state, void *handle);
339 +// Callback with all the connection info you require
340 +typedef void (*VCHI_CONNECTION_INFO)(VCHI_CONNECTION_STATE_T *state, uint32_t protocol_version, uint32_t slot_size, uint32_t num_slots, uint32_t min_bulk_size);
342 +// Callback to inform of a disconnect
343 +typedef void (*VCHI_CONNECTION_DISCONNECT)(VCHI_CONNECTION_STATE_T *state, uint32_t flags);
345 +// Callback to inform of a power control request
346 +typedef void (*VCHI_CONNECTION_POWER_CONTROL)(VCHI_CONNECTION_STATE_T *state, MESSAGE_TX_CHANNEL_T channel, int32_t enable);
348 +// allocate memory suitably aligned for this connection
349 +typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, uint32_t * length);
351 +// free memory allocated by buffer_allocate
352 +typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
355 +/******************************************************************************
356 + System driver struct
357 + *****************************************************************************/
359 +struct opaque_vchi_connection_api_t
361 + // Routine to init the connection
362 + VCHI_CONNECTION_INIT_T init;
364 + // Connection-level CRC control
365 + VCHI_CONNECTION_CRC_CONTROL_T crc_control;
367 + // Routine to connect to or create service
368 + VCHI_CONNECTION_SERVICE_CONNECT_T service_connect;
370 + // Routine to disconnect from a service
371 + VCHI_CONNECTION_SERVICE_DISCONNECT_T service_disconnect;
373 + // Routine to queue a message
374 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGE_T service_queue_msg;
376 + // scatter-gather (vector) message queue
377 + VCHI_CONNECTION_SERVICE_QUEUE_MESSAGEV_T service_queue_msgv;
379 + // Routine to dequeue a message
380 + VCHI_CONNECTION_SERVICE_DEQUEUE_MESSAGE_T service_dequeue_msg;
382 + // Routine to peek at a message
383 + VCHI_CONNECTION_SERVICE_PEEK_MESSAGE_T service_peek_msg;
385 + // Routine to hold a message
386 + VCHI_CONNECTION_SERVICE_HOLD_MESSAGE_T service_hold_msg;
388 + // Routine to initialise a received message iterator
389 + VCHI_CONNECTION_SERVICE_LOOKAHEAD_MESSAGE_T service_look_ahead_msg;
391 + // Routine to release a message
392 + VCHI_CONNECTION_HELD_MSG_RELEASE_T held_msg_release;
394 + // Routine to get information on a held message
395 + VCHI_CONNECTION_HELD_MSG_INFO_T held_msg_info;
397 + // Routine to check for next message on iterator
398 + VCHI_CONNECTION_MSG_ITER_HAS_NEXT_T msg_iter_has_next;
400 + // Routine to get next message on iterator
401 + VCHI_CONNECTION_MSG_ITER_NEXT_T msg_iter_next;
403 + // Routine to remove the last message returned by iterator
404 + VCHI_CONNECTION_MSG_ITER_REMOVE_T msg_iter_remove;
406 + // Routine to hold the last message returned by iterator
407 + VCHI_CONNECTION_MSG_ITER_HOLD_T msg_iter_hold;
409 + // Routine to transmit bulk data
410 + VCHI_CONNECTION_BULK_QUEUE_TRANSMIT_T bulk_queue_transmit;
412 + // Routine to receive data
413 + VCHI_CONNECTION_BULK_QUEUE_RECEIVE_T bulk_queue_receive;
415 + // Routine to report the available servers
416 + VCHI_CONNECTION_SERVER_PRESENT server_present;
418 + // Routine to report the number of RX slots available
419 + VCHI_CONNECTION_RX_SLOTS_AVAILABLE connection_rx_slots_available;
421 + // Routine to report the RX slot size
422 + VCHI_CONNECTION_RX_SLOT_SIZE connection_rx_slot_size;
424 + // Callback to indicate that the other side has added a buffer to the rx bulk DMA FIFO
425 + VCHI_CONNECTION_RX_BULK_BUFFER_ADDED rx_bulk_buffer_added;
427 + // Callback to inform a service that a Xon or Xoff message has been received
428 + VCHI_CONNECTION_FLOW_CONTROL flow_control;
430 + // Callback to inform a service that a server available reply message has been received
431 + VCHI_CONNECTION_SERVER_AVAILABLE_REPLY server_available_reply;
433 + // Callback to indicate that bulk auxiliary messages have arrived
434 + VCHI_CONNECTION_BULK_AUX_RECEIVED bulk_aux_received;
436 + // Callback to indicate that a bulk auxiliary message has been transmitted
437 + VCHI_CONNECTION_BULK_AUX_TRANSMITTED bulk_aux_transmitted;
439 + // Callback to provide information about the connection
440 + VCHI_CONNECTION_INFO connection_info;
442 + // Callback to notify that peer has requested disconnect
443 + VCHI_CONNECTION_DISCONNECT disconnect;
445 + // Callback to notify that peer has requested power change
446 + VCHI_CONNECTION_POWER_CONTROL power_control;
448 + // allocate memory suitably aligned for this connection
449 + VCHI_BUFFER_ALLOCATE buffer_allocate;
451 + // free memory allocated by buffer_allocate
452 + VCHI_BUFFER_FREE buffer_free;
456 +struct vchi_connection_t {
457 + const VCHI_CONNECTION_API_T *api;
458 + VCHI_CONNECTION_STATE_T *state;
459 +#ifdef VCHI_COARSE_LOCKING
460 + struct semaphore sem;
465 +#endif /* CONNECTION_H_ */
467 +/****************************** End of file **********************************/
469 +++ b/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h
472 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
474 + * Redistribution and use in source and binary forms, with or without
475 + * modification, are permitted provided that the following conditions
477 + * 1. Redistributions of source code must retain the above copyright
478 + * notice, this list of conditions, and the following disclaimer,
479 + * without modification.
480 + * 2. Redistributions in binary form must reproduce the above copyright
481 + * notice, this list of conditions and the following disclaimer in the
482 + * documentation and/or other materials provided with the distribution.
483 + * 3. The names of the above-listed copyright holders may not be used
484 + * to endorse or promote products derived from this software without
485 + * specific prior written permission.
487 + * ALTERNATIVELY, this software may be distributed under the terms of the
488 + * GNU General Public License ("GPL") version 2, as published by the Free
489 + * Software Foundation.
491 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
492 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
493 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
494 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
495 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
496 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
497 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
498 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
499 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
500 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
501 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
504 +#ifndef _VCHI_MESSAGE_H_
505 +#define _VCHI_MESSAGE_H_
507 +#include <linux/kernel.h>
508 +#include <linux/types.h>
509 +#include <linux/semaphore.h>
511 +#include "interface/vchi/vchi_cfg_internal.h"
512 +#include "interface/vchi/vchi_common.h"
515 +typedef enum message_event_type {
516 + MESSAGE_EVENT_NONE,
518 + MESSAGE_EVENT_MESSAGE,
519 + MESSAGE_EVENT_SLOT_COMPLETE,
520 + MESSAGE_EVENT_RX_BULK_PAUSED,
521 + MESSAGE_EVENT_RX_BULK_COMPLETE,
522 + MESSAGE_EVENT_TX_COMPLETE,
523 + MESSAGE_EVENT_MSG_DISCARDED
524 +} MESSAGE_EVENT_TYPE_T;
526 +typedef enum vchi_msg_flags
528 + VCHI_MSG_FLAGS_NONE = 0x0,
529 + VCHI_MSG_FLAGS_TERMINATE_DMA = 0x1
532 +typedef enum message_tx_channel
534 + MESSAGE_TX_CHANNEL_MESSAGE = 0,
535 + MESSAGE_TX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
536 +} MESSAGE_TX_CHANNEL_T;
538 +// Macros used for cycling through bulk channels
539 +#define MESSAGE_TX_CHANNEL_BULK_PREV(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION-1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
540 +#define MESSAGE_TX_CHANNEL_BULK_NEXT(c) (MESSAGE_TX_CHANNEL_BULK+((c)-MESSAGE_TX_CHANNEL_BULK+1)%VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION)
542 +typedef enum message_rx_channel
544 + MESSAGE_RX_CHANNEL_MESSAGE = 0,
545 + MESSAGE_RX_CHANNEL_BULK = 1 // drivers may provide multiple bulk channels, from 1 upwards
546 +} MESSAGE_RX_CHANNEL_T;
548 +// Message receive slot information
549 +typedef struct rx_msg_slot_info {
551 + struct rx_msg_slot_info *next;
552 + //struct slot_info *prev;
553 +#if !defined VCHI_COARSE_LOCKING
554 + struct semaphore sem;
557 + uint8_t *addr; // base address of slot
558 + uint32_t len; // length of slot in bytes
560 + uint32_t write_ptr; // hardware causes this to advance
561 + uint32_t read_ptr; // this module does the reading
562 + int active; // is this slot in the hardware dma fifo?
563 + uint32_t msgs_parsed; // count how many messages are in this slot
564 + uint32_t msgs_released; // how many messages have been released
565 + void *state; // connection state information
566 + uint8_t ref_count[VCHI_MAX_SERVICES_PER_CONNECTION]; // reference count for slots held by services
567 +} RX_MSG_SLOTINFO_T;
569 +// The message driver no longer needs to know about the fields of RX_BULK_SLOTINFO_T - sort this out.
570 +// In particular, it mustn't use addr and len - they're the client buffer, but the message
571 +// driver will be tasked with sending the aligned core section.
572 +typedef struct rx_bulk_slotinfo_t {
573 + struct rx_bulk_slotinfo_t *next;
575 + struct semaphore *blocking;
581 + // needed for the callback
584 + VCHI_FLAGS_T flags;
585 +} RX_BULK_SLOTINFO_T;
588 +/* ----------------------------------------------------------------------
589 + * each connection driver will have a pool of the following struct.
591 + * the pool will be managed by vchi_qman_*
592 + * this means there will be multiple queues (single linked lists)
593 + * a given struct message_info will be on exactly one of these queues
595 + * -------------------------------------------------------------------- */
596 +typedef struct rx_message_info {
598 + struct message_info *next;
599 + //struct message_info *prev;
603 + RX_MSG_SLOTINFO_T *slot; // points to whichever slot contains this message
604 + uint32_t tx_timestamp;
605 + uint32_t rx_timestamp;
607 +} RX_MESSAGE_INFO_T;
610 + MESSAGE_EVENT_TYPE_T type;
614 + void *addr; // address of message
615 + uint16_t slot_delta; // whether this message indicated slot delta
616 + uint32_t len; // length of message
617 + RX_MSG_SLOTINFO_T *slot; // slot this message is in
618 + int32_t service; // service id this message is destined for
619 + uint32_t tx_timestamp; // timestamp from the header
620 + uint32_t rx_timestamp; // timestamp when we parsed it
623 + // FIXME: cleanup slot reporting...
624 + RX_MSG_SLOTINFO_T *rx_msg;
625 + RX_BULK_SLOTINFO_T *rx_bulk;
627 + MESSAGE_TX_CHANNEL_T tx_channel;
633 +typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
636 + VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
637 +} VCHI_MESSAGE_DRIVER_OPEN_T;
640 +// handle to this instance of message driver (as returned by ->open)
641 +typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
643 +struct opaque_vchi_message_driver_t {
644 + VCHI_MDRIVER_HANDLE_T *(*open)( VCHI_MESSAGE_DRIVER_OPEN_T *params, void *state );
645 + int32_t (*suspending)( VCHI_MDRIVER_HANDLE_T *handle );
646 + int32_t (*resumed)( VCHI_MDRIVER_HANDLE_T *handle );
647 + int32_t (*power_control)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T, int32_t enable );
648 + int32_t (*add_msg_rx_slot)( VCHI_MDRIVER_HANDLE_T *handle, RX_MSG_SLOTINFO_T *slot ); // rx message
649 + int32_t (*add_bulk_rx)( VCHI_MDRIVER_HANDLE_T *handle, void *data, uint32_t len, RX_BULK_SLOTINFO_T *slot ); // rx data (bulk)
650 + int32_t (*send)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, VCHI_MSG_FLAGS_T flags, void *send_handle ); // tx (message & bulk)
651 + void (*next_event)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_EVENT_T *event ); // get the next event from message_driver
652 + int32_t (*enable)( VCHI_MDRIVER_HANDLE_T *handle );
653 + int32_t (*form_message)( VCHI_MDRIVER_HANDLE_T *handle, int32_t service_id, VCHI_MSG_VECTOR_T *vector, uint32_t count, void
654 + *address, uint32_t length_avail, uint32_t max_total_length, int32_t pad_to_fill, int32_t allow_partial );
656 + int32_t (*update_message)( VCHI_MDRIVER_HANDLE_T *handle, void *dest, int16_t *slot_count );
657 + int32_t (*buffer_aligned)( VCHI_MDRIVER_HANDLE_T *handle, int tx, int uncached, const void *address, const uint32_t length );
658 + void * (*allocate_buffer)( VCHI_MDRIVER_HANDLE_T *handle, uint32_t *length );
659 + void (*free_buffer)( VCHI_MDRIVER_HANDLE_T *handle, void *address );
660 + int (*rx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
661 + int (*tx_slot_size)( VCHI_MDRIVER_HANDLE_T *handle, int msg_size );
663 + int32_t (*tx_supports_terminate)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
664 + uint32_t (*tx_bulk_chunk_size)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
665 + int (*tx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel );
666 + int (*rx_alignment)( const VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_RX_CHANNEL_T channel );
667 + void (*form_bulk_aux)( VCHI_MDRIVER_HANDLE_T *handle, MESSAGE_TX_CHANNEL_T channel, const void *data, uint32_t len, uint32_t chunk_size, const void **aux_data, int32_t *aux_len );
668 + void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
672 +#endif // _VCHI_MESSAGE_H_
674 +/****************************** End of file ***********************************/
676 +++ b/drivers/misc/vc04_services/interface/vchi/vchi.h
679 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
681 + * Redistribution and use in source and binary forms, with or without
682 + * modification, are permitted provided that the following conditions
684 + * 1. Redistributions of source code must retain the above copyright
685 + * notice, this list of conditions, and the following disclaimer,
686 + * without modification.
687 + * 2. Redistributions in binary form must reproduce the above copyright
688 + * notice, this list of conditions and the following disclaimer in the
689 + * documentation and/or other materials provided with the distribution.
690 + * 3. The names of the above-listed copyright holders may not be used
691 + * to endorse or promote products derived from this software without
692 + * specific prior written permission.
694 + * ALTERNATIVELY, this software may be distributed under the terms of the
695 + * GNU General Public License ("GPL") version 2, as published by the Free
696 + * Software Foundation.
698 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
699 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
700 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
701 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
702 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
703 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
704 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
705 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
706 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
707 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
708 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
714 +#include "interface/vchi/vchi_cfg.h"
715 +#include "interface/vchi/vchi_common.h"
716 +#include "interface/vchi/connections/connection.h"
717 +#include "vchi_mh.h"
720 +/******************************************************************************
722 + *****************************************************************************/
724 +#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
725 +#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
726 +#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
728 +#ifdef USE_VCHIQ_ARM
729 +#define VCHI_BULK_ALIGNED(x) 1
731 +#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
734 +struct vchi_version {
736 + uint32_t version_min;
738 +#define VCHI_VERSION(v_) { v_, v_ }
739 +#define VCHI_VERSION_EX(v_, m_) { v_, m_ }
746 +} VCHI_MSG_VECTOR_TYPE_T;
748 +typedef struct vchi_msg_vector_ex {
750 + VCHI_MSG_VECTOR_TYPE_T type;
756 + VCHI_MEM_HANDLE_T handle;
761 + // an ordinary data pointer
764 + const void *vec_base;
768 + // a nested vector list
771 + struct vchi_msg_vector_ex *vec;
775 +} VCHI_MSG_VECTOR_EX_T;
778 +// Construct an entry in a msg vector for a pointer (p) of length (l)
779 +#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
781 +// Construct an entry in a msg vector for a message handle (h), starting at offset (o) of length (l)
782 +#define VCHI_VEC_HANDLE(h,o,l) VCHI_VEC_HANDLE, { { (h), (o), (l) } }
784 +// Macros to manipulate 'FOURCC' values
785 +#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
786 +#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
789 +// Opaque service information
790 +struct opaque_vchi_service_t;
792 +// Descriptor for a held message. Allocated by client, initialised by vchi_msg_hold,
793 +// vchi_msg_iter_hold or vchi_msg_iter_hold_next. Fields are for internal VCHI use only.
796 + struct opaque_vchi_service_t *service;
802 +// structure used to provide the information needed to open a server or a client
804 + struct vchi_version version;
805 + int32_t service_id;
806 + VCHI_CONNECTION_T *connection;
807 + uint32_t rx_fifo_size;
808 + uint32_t tx_fifo_size;
809 + VCHI_CALLBACK_T callback;
810 + void *callback_param;
811 + /* client intends to receive bulk transfers of
812 + odd lengths or into unaligned buffers */
813 + int32_t want_unaligned_bulk_rx;
814 + /* client intends to transmit bulk transfers of
815 + odd lengths or out of unaligned buffers */
816 + int32_t want_unaligned_bulk_tx;
817 + /* client wants to check CRCs on (bulk) xfers.
818 + Only needs to be set at 1 end - will do both directions. */
820 +} SERVICE_CREATION_T;
822 +// Opaque handle for a VCHI instance
823 +typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
825 +// Opaque handle for a server or client
826 +typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
828 +// Service registration & startup
829 +typedef void (*VCHI_SERVICE_INIT)(VCHI_INSTANCE_T initialise_instance, VCHI_CONNECTION_T **connections, uint32_t num_connections);
831 +typedef struct service_info_tag {
832 + const char * const vll_filename; /* VLL to load to start this service. This is an empty string if VLL is "static" */
833 + VCHI_SERVICE_INIT init; /* Service initialisation function */
834 + void *vll_handle; /* VLL handle; NULL when unloaded or a "static VLL" in build */
837 +/******************************************************************************
838 + Global funcs - implementation is specific to which side you are on (local / remote)
839 + *****************************************************************************/
845 +extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
846 + const VCHI_MESSAGE_DRIVER_T * low_level);
849 +// Routine used to initialise the vchi on both local + remote connections
850 +extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
852 +extern int32_t vchi_exit( void );
854 +extern int32_t vchi_connect( VCHI_CONNECTION_T **connections,
855 + const uint32_t num_connections,
856 + VCHI_INSTANCE_T instance_handle );
858 +//When this is called, ensure that all services have no data pending.
859 +//Bulk transfers can remain 'queued'
860 +extern int32_t vchi_disconnect( VCHI_INSTANCE_T instance_handle );
862 +// Global control over bulk CRC checking
863 +extern int32_t vchi_crc_control( VCHI_CONNECTION_T *connection,
864 + VCHI_CRC_CONTROL_T control );
867 +extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
868 +extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
869 +extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
872 +/******************************************************************************
874 + *****************************************************************************/
875 +// Routine to create a named service
876 +extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
877 + SERVICE_CREATION_T *setup,
878 + VCHI_SERVICE_HANDLE_T *handle );
880 +// Routine to destory a service
881 +extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
883 +// Routine to open a named service
884 +extern int32_t vchi_service_open( VCHI_INSTANCE_T instance_handle,
885 + SERVICE_CREATION_T *setup,
886 + VCHI_SERVICE_HANDLE_T *handle);
888 +extern int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle,
889 + short *peer_version );
891 +// Routine to close a named service
892 +extern int32_t vchi_service_close( const VCHI_SERVICE_HANDLE_T handle );
894 +// Routine to increment ref count on a named service
895 +extern int32_t vchi_service_use( const VCHI_SERVICE_HANDLE_T handle );
897 +// Routine to decrement ref count on a named service
898 +extern int32_t vchi_service_release( const VCHI_SERVICE_HANDLE_T handle );
900 +// Routine to send a message accross a service
901 +extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
903 + uint32_t data_size,
904 + VCHI_FLAGS_T flags,
905 + void *msg_handle );
907 +// scatter-gather (vector) and send message
908 +int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
909 + VCHI_MSG_VECTOR_EX_T *vector,
911 + VCHI_FLAGS_T flags,
912 + void *msg_handle );
914 +// legacy scatter-gather (vector) and send message, only handles pointers
915 +int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
916 + VCHI_MSG_VECTOR_T *vector,
918 + VCHI_FLAGS_T flags,
919 + void *msg_handle );
921 +// Routine to receive a msg from a service
922 +// Dequeue is equivalent to hold, copy into client buffer, release
923 +extern int32_t vchi_msg_dequeue( VCHI_SERVICE_HANDLE_T handle,
925 + uint32_t max_data_size_to_read,
926 + uint32_t *actual_msg_size,
927 + VCHI_FLAGS_T flags );
929 +// Routine to look at a message in place.
930 +// The message is not dequeued, so a subsequent call to peek or dequeue
931 +// will return the same message.
932 +extern int32_t vchi_msg_peek( VCHI_SERVICE_HANDLE_T handle,
934 + uint32_t *msg_size,
935 + VCHI_FLAGS_T flags );
937 +// Routine to remove a message after it has been read in place with peek
938 +// The first message on the queue is dequeued.
939 +extern int32_t vchi_msg_remove( VCHI_SERVICE_HANDLE_T handle );
941 +// Routine to look at a message in place.
942 +// The message is dequeued, so the caller is left holding it; the descriptor is
943 +// filled in and must be released when the user has finished with the message.
944 +extern int32_t vchi_msg_hold( VCHI_SERVICE_HANDLE_T handle,
945 + void **data, // } may be NULL, as info can be
946 + uint32_t *msg_size, // } obtained from HELD_MSG_T
947 + VCHI_FLAGS_T flags,
948 + VCHI_HELD_MSG_T *message_descriptor );
950 +// Initialise an iterator to look through messages in place
951 +extern int32_t vchi_msg_look_ahead( VCHI_SERVICE_HANDLE_T handle,
952 + VCHI_MSG_ITER_T *iter,
953 + VCHI_FLAGS_T flags );
955 +/******************************************************************************
956 + Global service support API - operations on held messages and message iterators
957 + *****************************************************************************/
959 +// Routine to get the address of a held message
960 +extern void *vchi_held_msg_ptr( const VCHI_HELD_MSG_T *message );
962 +// Routine to get the size of a held message
963 +extern int32_t vchi_held_msg_size( const VCHI_HELD_MSG_T *message );
965 +// Routine to get the transmit timestamp as written into the header by the peer
966 +extern uint32_t vchi_held_msg_tx_timestamp( const VCHI_HELD_MSG_T *message );
968 +// Routine to get the reception timestamp, written as we parsed the header
969 +extern uint32_t vchi_held_msg_rx_timestamp( const VCHI_HELD_MSG_T *message );
971 +// Routine to release a held message after it has been processed
972 +extern int32_t vchi_held_msg_release( VCHI_HELD_MSG_T *message );
974 +// Indicates whether the iterator has a next message.
975 +extern int32_t vchi_msg_iter_has_next( const VCHI_MSG_ITER_T *iter );
977 +// Return the pointer and length for the next message and advance the iterator.
978 +extern int32_t vchi_msg_iter_next( VCHI_MSG_ITER_T *iter,
980 + uint32_t *msg_size );
982 +// Remove the last message returned by vchi_msg_iter_next.
983 +// Can only be called once after each call to vchi_msg_iter_next.
984 +extern int32_t vchi_msg_iter_remove( VCHI_MSG_ITER_T *iter );
986 +// Hold the last message returned by vchi_msg_iter_next.
987 +// Can only be called once after each call to vchi_msg_iter_next.
988 +extern int32_t vchi_msg_iter_hold( VCHI_MSG_ITER_T *iter,
989 + VCHI_HELD_MSG_T *message );
991 +// Return information for the next message, and hold it, advancing the iterator.
992 +extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
993 + void **data, // } may be NULL
994 + uint32_t *msg_size, // }
995 + VCHI_HELD_MSG_T *message );
998 +/******************************************************************************
1000 + *****************************************************************************/
1002 +// Routine to prepare interface for a transfer from the other side
1003 +extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
1005 + uint32_t data_size,
1006 + VCHI_FLAGS_T flags,
1007 + void *transfer_handle );
1010 +// Prepare interface for a transfer from the other side into relocatable memory.
1011 +int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
1012 + VCHI_MEM_HANDLE_T h_dst,
1014 + uint32_t data_size,
1015 + const VCHI_FLAGS_T flags,
1016 + void * const bulk_handle );
1018 +// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
1019 +extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
1020 + const void *data_src,
1021 + uint32_t data_size,
1022 + VCHI_FLAGS_T flags,
1023 + void *transfer_handle );
1026 +/******************************************************************************
1027 + Configuration plumbing
1028 + *****************************************************************************/
1030 +// function prototypes for the different mid layers (the state info gives the different physical connections)
1031 +extern const VCHI_CONNECTION_API_T *single_get_func_table( void );
1032 +//extern const VCHI_CONNECTION_API_T *local_server_get_func_table( void );
1033 +//extern const VCHI_CONNECTION_API_T *local_client_get_func_table( void );
1035 +// declare all message drivers here
1036 +const VCHI_MESSAGE_DRIVER_T *vchi_mphi_message_driver_func_table( void );
1042 +extern int32_t vchi_bulk_queue_transmit_reloc( VCHI_SERVICE_HANDLE_T handle,
1043 + VCHI_MEM_HANDLE_T h_src,
1045 + uint32_t data_size,
1046 + VCHI_FLAGS_T flags,
1047 + void *transfer_handle );
1048 +#endif /* VCHI_H_ */
1050 +/****************************** End of file **********************************/
1052 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h
1055 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1057 + * Redistribution and use in source and binary forms, with or without
1058 + * modification, are permitted provided that the following conditions
1060 + * 1. Redistributions of source code must retain the above copyright
1061 + * notice, this list of conditions, and the following disclaimer,
1062 + * without modification.
1063 + * 2. Redistributions in binary form must reproduce the above copyright
1064 + * notice, this list of conditions and the following disclaimer in the
1065 + * documentation and/or other materials provided with the distribution.
1066 + * 3. The names of the above-listed copyright holders may not be used
1067 + * to endorse or promote products derived from this software without
1068 + * specific prior written permission.
1070 + * ALTERNATIVELY, this software may be distributed under the terms of the
1071 + * GNU General Public License ("GPL") version 2, as published by the Free
1072 + * Software Foundation.
1074 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1075 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1076 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1077 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1078 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1079 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1080 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1081 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1082 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1083 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1084 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1087 +#ifndef VCHI_CFG_H_
1088 +#define VCHI_CFG_H_
1090 +/****************************************************************************************
1091 + * Defines in this first section are part of the VCHI API and may be examined by VCHI
1093 + ***************************************************************************************/
1095 +/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
1096 +/* Really determined by the message driver, and should be available from a run-time call. */
1097 +#ifndef VCHI_BULK_ALIGN
1098 +# if __VCCOREVER__ >= 0x04000000
1099 +# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
1101 +# define VCHI_BULK_ALIGN 16
1105 +/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
1106 +/* May be less than or greater than VCHI_BULK_ALIGN */
1107 +/* Really determined by the message driver, and should be available from a run-time call. */
1108 +#ifndef VCHI_BULK_GRANULARITY
1109 +# if __VCCOREVER__ >= 0x04000000
1110 +# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
1112 +# define VCHI_BULK_GRANULARITY 16
1116 +/* The largest possible message to be queued with vchi_msg_queue. */
1117 +#ifndef VCHI_MAX_MSG_SIZE
1118 +# if defined VCHI_LOCAL_HOST_PORT
1119 +# define VCHI_MAX_MSG_SIZE 16384 // makes file transfers fast, but should they be using bulk?
1121 +# define VCHI_MAX_MSG_SIZE 4096 // NOTE: THIS MUST BE LARGER THAN OR EQUAL TO THE SIZE OF THE KHRONOS MERGE BUFFER!!
1125 +/******************************************************************************************
1126 + * Defines below are system configuration options, and should not be used by VCHI services.
1127 + *****************************************************************************************/
1129 +/* How many connections can we support? A localhost implementation uses 2 connections,
1130 + * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
1132 +#ifndef VCHI_MAX_NUM_CONNECTIONS
1133 +# define VCHI_MAX_NUM_CONNECTIONS 3
1136 +/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
1137 + * amount of static memory. */
1138 +#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
1139 +# define VCHI_MAX_SERVICES_PER_CONNECTION 36
1142 +/* Adjust if using a message driver that supports more logical TX channels */
1143 +#ifndef VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION
1144 +# define VCHI_MAX_BULK_TX_CHANNELS_PER_CONNECTION 9 // 1 MPHI + 8 CCP2 logical channels
1147 +/* Adjust if using a message driver that supports more logical RX channels */
1148 +#ifndef VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION
1149 +# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
1152 +/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
1153 + * receive queue space, less message headers. */
1154 +#ifndef VCHI_NUM_READ_SLOTS
1155 +# if defined(VCHI_LOCAL_HOST_PORT)
1156 +# define VCHI_NUM_READ_SLOTS 4
1158 +# define VCHI_NUM_READ_SLOTS 48
1162 +/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
1163 + * performance. Only define on VideoCore end, talking to host.
1165 +//#define VCHI_MSG_RX_OVERRUN
1167 +/* How many transmit slots do we use. Generally don't need many, as the hardware driver
1168 + * underneath VCHI will usually have its own buffering. */
1169 +#ifndef VCHI_NUM_WRITE_SLOTS
1170 +# define VCHI_NUM_WRITE_SLOTS 4
1173 +/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
1174 + * then it's taking up too much buffer space, and the peer service will be told to stop
1175 + * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
1176 + * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
1178 +#ifndef VCHI_XOFF_THRESHOLD
1179 +# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
1182 +/* After we've sent an XOFF, the peer will be told to resume transmission once the local
1183 + * service has dequeued/released enough messages that it's now occupying
1184 + * VCHI_XON_THRESHOLD slots or fewer. */
1185 +#ifndef VCHI_XON_THRESHOLD
1186 +# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
1189 +/* A size below which a bulk transfer omits the handshake completely and always goes
1190 + * via the message channel, if bulk auxiliary is being sent on that service. (The user
1191 + * can guarantee this by enabling unaligned transmits).
1193 +#ifndef VCHI_MIN_BULK_SIZE
1194 +# define VCHI_MIN_BULK_SIZE ( VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096 )
1197 +/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
1198 + * speed and latency; the smaller the chunk size the better change of messages and other
1199 + * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
1200 + * break transmissions into chunks.
1202 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
1203 +# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
1206 +/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
1207 + * with multiple-line frames. Only use if the receiver can cope. */
1208 +#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
1209 +# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
1212 +/* How many TX messages can we have pending in our transmit slots. Once exhausted,
1213 + * vchi_msg_queue will be blocked. */
1214 +#ifndef VCHI_TX_MSG_QUEUE_SIZE
1215 +# define VCHI_TX_MSG_QUEUE_SIZE 256
1218 +/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
1219 + * will be suspended until older messages are dequeued/released. */
1220 +#ifndef VCHI_RX_MSG_QUEUE_SIZE
1221 +# define VCHI_RX_MSG_QUEUE_SIZE 256
1224 +/* Really should be able to cope if we run out of received message descriptors, by
1225 + * suspending parsing as the comment above says, but we don't. This sweeps the issue
1226 + * under the carpet. */
1227 +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1228 +# undef VCHI_RX_MSG_QUEUE_SIZE
1229 +# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
1232 +/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
1233 + * will be blocked. */
1234 +#ifndef VCHI_TX_BULK_QUEUE_SIZE
1235 +# define VCHI_TX_BULK_QUEUE_SIZE 64
1238 +/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
1239 + * will be blocked. */
1240 +#ifndef VCHI_RX_BULK_QUEUE_SIZE
1241 +# define VCHI_RX_BULK_QUEUE_SIZE 64
1244 +/* A limit on how many outstanding bulk requests we expect the peer to give us. If
1245 + * the peer asks for more than this, VCHI will fail and assert. The number is determined
1246 + * by the peer's hardware - it's the number of outstanding requests that can be queued
1247 + * on all bulk channels. VC3's MPHI peripheral allows 16. */
1248 +#ifndef VCHI_MAX_PEER_BULK_REQUESTS
1249 +# define VCHI_MAX_PEER_BULK_REQUESTS 32
1252 +/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
1253 + * transmitter on and off.
1255 +/*#define VCHI_CCP2TX_MANUAL_POWER*/
1257 +#ifndef VCHI_CCP2TX_MANUAL_POWER
1259 +/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
1260 + * negative for no IDLE.
1262 +# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
1263 +# define VCHI_CCP2TX_IDLE_TIMEOUT 5
1266 +/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
1267 + * negative for no OFF.
1269 +# ifndef VCHI_CCP2TX_OFF_TIMEOUT
1270 +# define VCHI_CCP2TX_OFF_TIMEOUT 1000
1273 +#endif /* VCHI_CCP2TX_MANUAL_POWER */
1275 +#endif /* VCHI_CFG_H_ */
1277 +/****************************** End of file **********************************/
1279 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h
1282 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1284 + * Redistribution and use in source and binary forms, with or without
1285 + * modification, are permitted provided that the following conditions
1287 + * 1. Redistributions of source code must retain the above copyright
1288 + * notice, this list of conditions, and the following disclaimer,
1289 + * without modification.
1290 + * 2. Redistributions in binary form must reproduce the above copyright
1291 + * notice, this list of conditions and the following disclaimer in the
1292 + * documentation and/or other materials provided with the distribution.
1293 + * 3. The names of the above-listed copyright holders may not be used
1294 + * to endorse or promote products derived from this software without
1295 + * specific prior written permission.
1297 + * ALTERNATIVELY, this software may be distributed under the terms of the
1298 + * GNU General Public License ("GPL") version 2, as published by the Free
1299 + * Software Foundation.
1301 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1302 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1303 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1304 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1305 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1306 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1307 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1308 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1309 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1310 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1311 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1314 +#ifndef VCHI_CFG_INTERNAL_H_
1315 +#define VCHI_CFG_INTERNAL_H_
1317 +/****************************************************************************************
1318 + * Control optimisation attempts.
1319 + ***************************************************************************************/
1321 +// Don't use lots of short-term locks - use great long ones, reducing the overall locks-per-second
1322 +#define VCHI_COARSE_LOCKING
1324 +// Avoid lock then unlock on exit from blocking queue operations (msg tx, bulk rx/tx)
1325 +// (only relevant if VCHI_COARSE_LOCKING)
1326 +#define VCHI_ELIDE_BLOCK_EXIT_LOCK
1328 +// Avoid lock on non-blocking peek
1329 +// (only relevant if VCHI_COARSE_LOCKING)
1330 +#define VCHI_AVOID_PEEK_LOCK
1332 +// Use one slot-handler thread per connection, rather than 1 thread dealing with all connections in rotation.
1333 +#define VCHI_MULTIPLE_HANDLER_THREADS
1335 +// Put free descriptors onto the head of the free queue, rather than the tail, so that we don't thrash
1336 +// our way through the pool of descriptors.
1337 +#define VCHI_PUSH_FREE_DESCRIPTORS_ONTO_HEAD
1339 +// Don't issue a MSG_AVAILABLE callback for every single message. Possibly only safe if VCHI_COARSE_LOCKING.
1340 +#define VCHI_FEWER_MSG_AVAILABLE_CALLBACKS
1342 +// Don't use message descriptors for TX messages that don't need them
1343 +#define VCHI_MINIMISE_TX_MSG_DESCRIPTORS
1345 +// Nano-locks for multiqueue
1346 +//#define VCHI_MQUEUE_NANOLOCKS
1348 +// Lock-free(er) dequeuing
1349 +//#define VCHI_RX_NANOLOCKS
1351 +#endif /*VCHI_CFG_INTERNAL_H_*/
1353 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_common.h
1356 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1358 + * Redistribution and use in source and binary forms, with or without
1359 + * modification, are permitted provided that the following conditions
1361 + * 1. Redistributions of source code must retain the above copyright
1362 + * notice, this list of conditions, and the following disclaimer,
1363 + * without modification.
1364 + * 2. Redistributions in binary form must reproduce the above copyright
1365 + * notice, this list of conditions and the following disclaimer in the
1366 + * documentation and/or other materials provided with the distribution.
1367 + * 3. The names of the above-listed copyright holders may not be used
1368 + * to endorse or promote products derived from this software without
1369 + * specific prior written permission.
1371 + * ALTERNATIVELY, this software may be distributed under the terms of the
1372 + * GNU General Public License ("GPL") version 2, as published by the Free
1373 + * Software Foundation.
1375 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1376 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1377 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1378 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1379 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1380 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1381 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1382 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1383 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1384 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1385 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1388 +#ifndef VCHI_COMMON_H_
1389 +#define VCHI_COMMON_H_
1392 +//flags used when sending messages (must be bitmapped)
1395 + VCHI_FLAGS_NONE = 0x0,
1396 + VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
1397 + VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
1398 + VCHI_FLAGS_BLOCK_UNTIL_QUEUED = 0x4, // return once the transfer is in a queue ready to go
1399 + VCHI_FLAGS_ALLOW_PARTIAL = 0x8,
1400 + VCHI_FLAGS_BLOCK_UNTIL_DATA_READ = 0x10,
1401 + VCHI_FLAGS_CALLBACK_WHEN_DATA_READ = 0x20,
1403 + VCHI_FLAGS_ALIGN_SLOT = 0x000080, // internal use only
1404 + VCHI_FLAGS_BULK_AUX_QUEUED = 0x010000, // internal use only
1405 + VCHI_FLAGS_BULK_AUX_COMPLETE = 0x020000, // internal use only
1406 + VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
1407 + VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
1408 + VCHI_FLAGS_INTERNAL = 0xFF0000
1411 +// constants for vchi_crc_control()
1413 + VCHI_CRC_NOTHING = -1,
1414 + VCHI_CRC_PER_SERVICE = 0,
1415 + VCHI_CRC_EVERYTHING = 1,
1416 +} VCHI_CRC_CONTROL_T;
1418 +//callback reasons when an event occurs on a service
1421 + VCHI_CALLBACK_REASON_MIN,
1423 + //This indicates that there is data available
1424 + //handle is the msg id that was transmitted with the data
1425 + // When a message is received and there was no FULL message available previously, send callback
1426 + // Tasks get kicked by the callback, reset their event and try and read from the fifo until it fails
1427 + VCHI_CALLBACK_MSG_AVAILABLE,
1428 + VCHI_CALLBACK_MSG_SENT,
1429 + VCHI_CALLBACK_MSG_SPACE_AVAILABLE, // XXX not yet implemented
1431 + // This indicates that a transfer from the other side has completed
1432 + VCHI_CALLBACK_BULK_RECEIVED,
1433 + //This indicates that data queued up to be sent has now gone
1434 + //handle is the msg id that was used when sending the data
1435 + VCHI_CALLBACK_BULK_SENT,
1436 + VCHI_CALLBACK_BULK_RX_SPACE_AVAILABLE, // XXX not yet implemented
1437 + VCHI_CALLBACK_BULK_TX_SPACE_AVAILABLE, // XXX not yet implemented
1439 + VCHI_CALLBACK_SERVICE_CLOSED,
1441 + // this side has sent XOFF to peer due to lack of data consumption by service
1442 + // (suggests the service may need to take some recovery action if it has
1443 + // been deliberately holding off consuming data)
1444 + VCHI_CALLBACK_SENT_XOFF,
1445 + VCHI_CALLBACK_SENT_XON,
1447 + // indicates that a bulk transfer has finished reading the source buffer
1448 + VCHI_CALLBACK_BULK_DATA_READ,
1450 + // power notification events (currently host side only)
1451 + VCHI_CALLBACK_PEER_OFF,
1452 + VCHI_CALLBACK_PEER_SUSPENDED,
1453 + VCHI_CALLBACK_PEER_ON,
1454 + VCHI_CALLBACK_PEER_RESUMED,
1455 + VCHI_CALLBACK_FORCED_POWER_OFF,
1457 +#ifdef USE_VCHIQ_ARM
1458 + // some extra notifications provided by vchiq_arm
1459 + VCHI_CALLBACK_SERVICE_OPENED,
1460 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
1461 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
1464 + VCHI_CALLBACK_REASON_MAX
1465 +} VCHI_CALLBACK_REASON_T;
1467 +//Calback used by all services / bulk transfers
1468 +typedef void (*VCHI_CALLBACK_T)( void *callback_param, //my service local param
1469 + VCHI_CALLBACK_REASON_T reason,
1470 + void *handle ); //for transmitting msg's only
1475 + * Define vector struct for scatter-gather (vector) operations
1476 + * Vectors can be nested - if a vector element has negative length, then
1477 + * the data pointer is treated as pointing to another vector array, with
1478 + * '-vec_len' elements. Thus to append a header onto an existing vector,
1479 + * you can do this:
1481 + * void foo(const VCHI_MSG_VECTOR_T *v, int n)
1483 + * VCHI_MSG_VECTOR_T nv[2];
1484 + * nv[0].vec_base = my_header;
1485 + * nv[0].vec_len = sizeof my_header;
1486 + * nv[1].vec_base = v;
1487 + * nv[1].vec_len = -n;
1491 +typedef struct vchi_msg_vector {
1492 + const void *vec_base;
1494 +} VCHI_MSG_VECTOR_T;
1496 +// Opaque type for a connection API
1497 +typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
1499 +// Opaque type for a message driver
1500 +typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
1503 +// Iterator structure for reading ahead through received message queue. Allocated by client,
1504 +// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
1505 +// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
1506 +// will not proceed to messages received since. Behaviour is undefined if an iterator
1507 +// is used again after messages for that service are removed/dequeued by any
1508 +// means other than vchi_msg_iter_... calls on the iterator itself.
1510 + struct opaque_vchi_service_t *service;
1517 +#endif // VCHI_COMMON_H_
1519 +++ b/drivers/misc/vc04_services/interface/vchi/vchi_mh.h
1522 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1524 + * Redistribution and use in source and binary forms, with or without
1525 + * modification, are permitted provided that the following conditions
1527 + * 1. Redistributions of source code must retain the above copyright
1528 + * notice, this list of conditions, and the following disclaimer,
1529 + * without modification.
1530 + * 2. Redistributions in binary form must reproduce the above copyright
1531 + * notice, this list of conditions and the following disclaimer in the
1532 + * documentation and/or other materials provided with the distribution.
1533 + * 3. The names of the above-listed copyright holders may not be used
1534 + * to endorse or promote products derived from this software without
1535 + * specific prior written permission.
1537 + * ALTERNATIVELY, this software may be distributed under the terms of the
1538 + * GNU General Public License ("GPL") version 2, as published by the Free
1539 + * Software Foundation.
1541 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1542 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1543 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1544 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1545 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1546 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1547 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1548 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1549 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1550 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1551 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1557 +#include <linux/types.h>
1559 +typedef int32_t VCHI_MEM_HANDLE_T;
1560 +#define VCHI_MEM_HANDLE_INVALID 0
1564 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h
1567 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1569 + * Redistribution and use in source and binary forms, with or without
1570 + * modification, are permitted provided that the following conditions
1572 + * 1. Redistributions of source code must retain the above copyright
1573 + * notice, this list of conditions, and the following disclaimer,
1574 + * without modification.
1575 + * 2. Redistributions in binary form must reproduce the above copyright
1576 + * notice, this list of conditions and the following disclaimer in the
1577 + * documentation and/or other materials provided with the distribution.
1578 + * 3. The names of the above-listed copyright holders may not be used
1579 + * to endorse or promote products derived from this software without
1580 + * specific prior written permission.
1582 + * ALTERNATIVELY, this software may be distributed under the terms of the
1583 + * GNU General Public License ("GPL") version 2, as published by the Free
1584 + * Software Foundation.
1586 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1587 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1588 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1589 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1590 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1591 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1592 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1593 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1594 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1595 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1596 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1599 +#ifndef VCHIQ_VCHIQ_H
1600 +#define VCHIQ_VCHIQ_H
1602 +#include "vchiq_if.h"
1603 +#include "vchiq_util.h"
1607 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h
1610 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1612 + * Redistribution and use in source and binary forms, with or without
1613 + * modification, are permitted provided that the following conditions
1615 + * 1. Redistributions of source code must retain the above copyright
1616 + * notice, this list of conditions, and the following disclaimer,
1617 + * without modification.
1618 + * 2. Redistributions in binary form must reproduce the above copyright
1619 + * notice, this list of conditions and the following disclaimer in the
1620 + * documentation and/or other materials provided with the distribution.
1621 + * 3. The names of the above-listed copyright holders may not be used
1622 + * to endorse or promote products derived from this software without
1623 + * specific prior written permission.
1625 + * ALTERNATIVELY, this software may be distributed under the terms of the
1626 + * GNU General Public License ("GPL") version 2, as published by the Free
1627 + * Software Foundation.
1629 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1630 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1631 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1632 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1633 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1634 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1635 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1636 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1637 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1638 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1639 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1642 +#ifndef VCHIQ_2835_H
1643 +#define VCHIQ_2835_H
1645 +#include "vchiq_pagelist.h"
1647 +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
1648 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
1650 +#endif /* VCHIQ_2835_H */
1652 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
1655 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
1657 + * Redistribution and use in source and binary forms, with or without
1658 + * modification, are permitted provided that the following conditions
1660 + * 1. Redistributions of source code must retain the above copyright
1661 + * notice, this list of conditions, and the following disclaimer,
1662 + * without modification.
1663 + * 2. Redistributions in binary form must reproduce the above copyright
1664 + * notice, this list of conditions and the following disclaimer in the
1665 + * documentation and/or other materials provided with the distribution.
1666 + * 3. The names of the above-listed copyright holders may not be used
1667 + * to endorse or promote products derived from this software without
1668 + * specific prior written permission.
1670 + * ALTERNATIVELY, this software may be distributed under the terms of the
1671 + * GNU General Public License ("GPL") version 2, as published by the Free
1672 + * Software Foundation.
1674 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
1675 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1676 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1677 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
1678 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
1679 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
1680 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
1681 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
1682 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
1683 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1684 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1687 +#include <linux/kernel.h>
1688 +#include <linux/types.h>
1689 +#include <linux/errno.h>
1690 +#include <linux/interrupt.h>
1691 +#include <linux/irq.h>
1692 +#include <linux/pagemap.h>
1693 +#include <linux/dma-mapping.h>
1694 +#include <linux/version.h>
1695 +#include <linux/io.h>
1696 +#include <linux/uaccess.h>
1697 +#include <asm/pgtable.h>
1699 +#include <mach/irqs.h>
1701 +#include <mach/platform.h>
1702 +#include <mach/vcio.h>
1704 +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
1706 +#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
1707 +#define VCHIQ_ARM_ADDRESS(x) ((void *)__virt_to_bus((unsigned)x))
1709 +#include "vchiq_arm.h"
1710 +#include "vchiq_2835.h"
1711 +#include "vchiq_connected.h"
1713 +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
1715 +typedef struct vchiq_2835_state_struct {
1717 + VCHIQ_ARM_STATE_T arm_state;
1718 +} VCHIQ_2835_ARM_STATE_T;
1720 +static char *g_slot_mem;
1721 +static int g_slot_mem_size;
1722 +dma_addr_t g_slot_phys;
1723 +static FRAGMENTS_T *g_fragments_base;
1724 +static FRAGMENTS_T *g_free_fragments;
1725 +struct semaphore g_free_fragments_sema;
1727 +extern int vchiq_arm_log_level;
1729 +static DEFINE_SEMAPHORE(g_free_fragments_mutex);
1732 +vchiq_doorbell_irq(int irq, void *dev_id);
1735 +create_pagelist(char __user *buf, size_t count, unsigned short type,
1736 + struct task_struct *task, PAGELIST_T ** ppagelist);
1739 +free_pagelist(PAGELIST_T *pagelist, int actual);
1742 +vchiq_platform_init(VCHIQ_STATE_T *state)
1744 + VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
1745 + int frag_mem_size;
1749 + /* Allocate space for the channels in coherent memory */
1750 + g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
1751 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);
1753 + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
1754 + &g_slot_phys, GFP_ATOMIC);
1756 + if (!g_slot_mem) {
1757 + vchiq_log_error(vchiq_arm_log_level,
1758 + "Unable to allocate channel memory");
1760 + goto failed_alloc;
1763 + WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);
1765 + vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
1766 + if (!vchiq_slot_zero) {
1768 + goto failed_init_slots;
1771 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
1772 + (int)g_slot_phys + g_slot_mem_size;
1773 + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
1776 + g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
1777 + g_slot_mem_size += frag_mem_size;
1779 + g_free_fragments = g_fragments_base;
1780 + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
1781 + *(FRAGMENTS_T **)&g_fragments_base[i] =
1782 + &g_fragments_base[i + 1];
1784 + *(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
1785 + sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
1787 + if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
1790 + goto failed_vchiq_init;
1793 + err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
1794 + IRQF_IRQPOLL, "VCHIQ doorbell",
1797 + vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
1798 + "irq=%d err=%d", __func__,
1799 + VCHIQ_DOORBELL_IRQ, err);
1800 + goto failed_request_irq;
1803 + /* Send the base address of the slots to VideoCore */
1805 + dsb(); /* Ensure all writes have completed */
1807 + bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);
1809 + vchiq_log_info(vchiq_arm_log_level,
1810 + "vchiq_init - done (slots %x, phys %x)",
1811 + (unsigned int)vchiq_slot_zero, g_slot_phys);
1813 + vchiq_call_connected_callbacks();
1817 +failed_request_irq:
1820 + dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);
1827 +vchiq_platform_exit(VCHIQ_STATE_T *state)
1829 + free_irq(VCHIQ_DOORBELL_IRQ, state);
1830 + dma_free_coherent(NULL, g_slot_mem_size,
1831 + g_slot_mem, g_slot_phys);
1836 +vchiq_platform_init_state(VCHIQ_STATE_T *state)
1838 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1839 + state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
1840 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
1841 + status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
1842 + if(status != VCHIQ_SUCCESS)
1844 + ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
1850 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
1852 + if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
1856 + return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
1860 +remote_event_signal(REMOTE_EVENT_T *event)
1866 + dsb(); /* data barrier operation */
1868 + if (event->armed) {
1869 + /* trigger vc interrupt */
1871 + writel(0, __io_address(ARM_0_BELL2));
1876 +vchiq_copy_from_user(void *dst, const void *src, int size)
1878 + if ((uint32_t)src < TASK_SIZE) {
1879 + return copy_from_user(dst, src, size);
1881 + memcpy(dst, src, size);
1887 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
1888 + void *offset, int size, int dir)
1890 + PAGELIST_T *pagelist;
1893 + WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
1895 + ret = create_pagelist((char __user *)offset, size,
1896 + (dir == VCHIQ_BULK_RECEIVE)
1902 + return VCHIQ_ERROR;
1904 + bulk->handle = memhandle;
1905 + bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
1907 + /* Store the pagelist address in remote_data, which isn't used by the
1909 + bulk->remote_data = pagelist;
1911 + return VCHIQ_SUCCESS;
1915 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
1917 + if (bulk && bulk->remote_data && bulk->actual)
1918 + free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
1922 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
1925 + * This should only be called on the master (VideoCore) side, but
1926 + * provide an implementation to avoid the need for ifdefery.
1932 +vchiq_dump_platform_state(void *dump_context)
1936 + len = snprintf(buf, sizeof(buf),
1937 + " Platform: 2835 (VC master)");
1938 + vchiq_dump(dump_context, buf, len + 1);
1942 +vchiq_platform_suspend(VCHIQ_STATE_T *state)
1944 + return VCHIQ_ERROR;
1948 +vchiq_platform_resume(VCHIQ_STATE_T *state)
1950 + return VCHIQ_SUCCESS;
1954 +vchiq_platform_paused(VCHIQ_STATE_T *state)
1959 +vchiq_platform_resumed(VCHIQ_STATE_T *state)
1964 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
1966 + return 1; // autosuspend not supported - videocore always wanted
1970 +vchiq_platform_use_suspend_timer(void)
1975 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
1977 + vchiq_log_info((vchiq_arm_log_level>=VCHIQ_LOG_INFO),"Suspend timer not in use");
1980 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
1989 +vchiq_doorbell_irq(int irq, void *dev_id)
1991 + VCHIQ_STATE_T *state = dev_id;
1992 + irqreturn_t ret = IRQ_NONE;
1993 + unsigned int status;
1995 + /* Read (and clear) the doorbell */
1996 + status = readl(__io_address(ARM_0_BELL0));
1998 + if (status & 0x4) { /* Was the doorbell rung? */
1999 + remote_event_pollall(state);
2000 + ret = IRQ_HANDLED;
2006 +/* There is a potential problem with partial cache lines (pages?)
2007 +** at the ends of the block when reading. If the CPU accessed anything in
2008 +** the same line (page?) then it may have pulled old data into the cache,
2009 +** obscuring the new data underneath. We can solve this by transferring the
2010 +** partial cache lines separately, and allowing the ARM to copy into the
2013 +** N.B. This implementation plays slightly fast and loose with the Linux
2014 +** driver programming rules, e.g. its use of __virt_to_bus instead of
2015 +** dma_map_single, but it isn't a multi-platform driver and it benefits
2016 +** from increased speed as a result.
2020 +create_pagelist(char __user *buf, size_t count, unsigned short type,
2021 + struct task_struct *task, PAGELIST_T ** ppagelist)
2023 + PAGELIST_T *pagelist;
2024 + struct page **pages;
2025 + struct page *page;
2026 + unsigned long *addrs;
2027 + unsigned int num_pages, offset, i;
2028 + char *addr, *base_addr, *next_addr;
2029 + int run, addridx, actual_pages;
2030 + unsigned long *need_release;
2032 + offset = (unsigned int)buf & (PAGE_SIZE - 1);
2033 + num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
2035 + *ppagelist = NULL;
2037 + /* Allocate enough storage to hold the page pointers and the page
2040 + pagelist = kmalloc(sizeof(PAGELIST_T) +
2041 + (num_pages * sizeof(unsigned long)) +
2042 + sizeof(unsigned long) +
2043 + (num_pages * sizeof(pages[0])),
2046 + vchiq_log_trace(vchiq_arm_log_level,
2047 + "create_pagelist - %x", (unsigned int)pagelist);
2051 + addrs = pagelist->addrs;
2052 + need_release = (unsigned long *)(addrs + num_pages);
2053 + pages = (struct page **)(addrs + num_pages + 1);
2055 + if (is_vmalloc_addr(buf)) {
2056 + for (actual_pages = 0; actual_pages < num_pages; actual_pages++) {
2057 + pages[actual_pages] = vmalloc_to_page(buf + (actual_pages * PAGE_SIZE));
2059 + *need_release = 0; /* do not try and release vmalloc pages */
2061 + down_read(&task->mm->mmap_sem);
2062 + actual_pages = get_user_pages(task, task->mm,
2063 + (unsigned long)buf & ~(PAGE_SIZE - 1),
2065 + (type == PAGELIST_READ) /*Write */ ,
2069 + up_read(&task->mm->mmap_sem);
2071 + if (actual_pages != num_pages) {
2072 + vchiq_log_info(vchiq_arm_log_level,
2073 + "create_pagelist - only %d/%d pages locked",
2077 + /* This is probably due to the process being killed */
2078 + while (actual_pages > 0)
2081 + page_cache_release(pages[actual_pages]);
2084 + if (actual_pages == 0)
2085 + actual_pages = -ENOMEM;
2086 + return actual_pages;
2088 + *need_release = 1; /* release user pages */
2091 + pagelist->length = count;
2092 + pagelist->type = type;
2093 + pagelist->offset = offset;
2095 + /* Group the pages into runs of contiguous pages */
2097 + base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
2098 + next_addr = base_addr + PAGE_SIZE;
2102 + for (i = 1; i < num_pages; i++) {
2103 + addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
2104 + if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
2105 + next_addr += PAGE_SIZE;
2108 + addrs[addridx] = (unsigned long)base_addr + run;
2111 + next_addr = addr + PAGE_SIZE;
2116 + addrs[addridx] = (unsigned long)base_addr + run;
2119 + /* Partial cache lines (fragments) require special measures */
2120 + if ((type == PAGELIST_READ) &&
2121 + ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
2122 + ((pagelist->offset + pagelist->length) &
2123 + (CACHE_LINE_SIZE - 1)))) {
2124 + FRAGMENTS_T *fragments;
2126 + if (down_interruptible(&g_free_fragments_sema) != 0) {
2131 + WARN_ON(g_free_fragments == NULL);
2133 + down(&g_free_fragments_mutex);
2134 + fragments = (FRAGMENTS_T *) g_free_fragments;
2135 + WARN_ON(fragments == NULL);
2136 + g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
2137 + up(&g_free_fragments_mutex);
2139 + PAGELIST_READ_WITH_FRAGMENTS + (fragments -
2140 + g_fragments_base);
2143 + for (page = virt_to_page(pagelist);
2144 + page <= virt_to_page(addrs + num_pages - 1); page++) {
2145 + flush_dcache_page(page);
2148 + *ppagelist = pagelist;
2154 +free_pagelist(PAGELIST_T *pagelist, int actual)
2156 + unsigned long *need_release;
2157 + struct page **pages;
2158 + unsigned int num_pages, i;
2160 + vchiq_log_trace(vchiq_arm_log_level,
2161 + "free_pagelist - %x, %d", (unsigned int)pagelist, actual);
2164 + (pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
2167 + need_release = (unsigned long *)(pagelist->addrs + num_pages);
2168 + pages = (struct page **)(pagelist->addrs + num_pages + 1);
2170 + /* Deal with any partial cache lines (fragments) */
2171 + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
2172 + FRAGMENTS_T *fragments = g_fragments_base +
2173 + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS);
2174 + int head_bytes, tail_bytes;
2175 + head_bytes = (CACHE_LINE_SIZE - pagelist->offset) &
2176 + (CACHE_LINE_SIZE - 1);
2177 + tail_bytes = (pagelist->offset + actual) &
2178 + (CACHE_LINE_SIZE - 1);
2180 + if ((actual >= 0) && (head_bytes != 0)) {
2181 + if (head_bytes > actual)
2182 + head_bytes = actual;
2184 + memcpy((char *)page_address(pages[0]) +
2186 + fragments->headbuf,
2189 + if ((actual >= 0) && (head_bytes < actual) &&
2190 + (tail_bytes != 0)) {
2191 + memcpy((char *)page_address(pages[num_pages - 1]) +
2192 + ((pagelist->offset + actual) &
2193 + (PAGE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)),
2194 + fragments->tailbuf, tail_bytes);
2197 + down(&g_free_fragments_mutex);
2198 + *(FRAGMENTS_T **) fragments = g_free_fragments;
2199 + g_free_fragments = fragments;
2200 + up(&g_free_fragments_mutex);
2201 + up(&g_free_fragments_sema);
2204 + if (*need_release) {
2205 + for (i = 0; i < num_pages; i++) {
2206 + if (pagelist->type != PAGELIST_WRITE)
2207 + set_page_dirty(pages[i]);
2209 + page_cache_release(pages[i]);
2216 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c
2219 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
2221 + * Redistribution and use in source and binary forms, with or without
2222 + * modification, are permitted provided that the following conditions
2224 + * 1. Redistributions of source code must retain the above copyright
2225 + * notice, this list of conditions, and the following disclaimer,
2226 + * without modification.
2227 + * 2. Redistributions in binary form must reproduce the above copyright
2228 + * notice, this list of conditions and the following disclaimer in the
2229 + * documentation and/or other materials provided with the distribution.
2230 + * 3. The names of the above-listed copyright holders may not be used
2231 + * to endorse or promote products derived from this software without
2232 + * specific prior written permission.
2234 + * ALTERNATIVELY, this software may be distributed under the terms of the
2235 + * GNU General Public License ("GPL") version 2, as published by the Free
2236 + * Software Foundation.
2238 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
2239 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2240 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2241 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2242 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2243 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2244 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2245 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2246 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2247 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2248 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2251 +#include <linux/kernel.h>
2252 +#include <linux/module.h>
2253 +#include <linux/types.h>
2254 +#include <linux/errno.h>
2255 +#include <linux/cdev.h>
2256 +#include <linux/fs.h>
2257 +#include <linux/device.h>
2258 +#include <linux/mm.h>
2259 +#include <linux/highmem.h>
2260 +#include <linux/pagemap.h>
2261 +#include <linux/bug.h>
2262 +#include <linux/semaphore.h>
2263 +#include <linux/list.h>
2264 +#include <linux/proc_fs.h>
2266 +#include "vchiq_core.h"
2267 +#include "vchiq_ioctl.h"
2268 +#include "vchiq_arm.h"
2270 +#define DEVICE_NAME "vchiq"
2272 +/* Override the default prefix, which would be vchiq_arm (from the filename) */
2273 +#undef MODULE_PARAM_PREFIX
2274 +#define MODULE_PARAM_PREFIX DEVICE_NAME "."
2276 +#define VCHIQ_MINOR 0
2278 +/* Some per-instance constants */
2279 +#define MAX_COMPLETIONS 16
2280 +#define MAX_SERVICES 64
2281 +#define MAX_ELEMENTS 8
2282 +#define MSG_QUEUE_SIZE 64
2284 +#define KEEPALIVE_VER 1
2285 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER
2287 +/* Run time control of log level, based on KERN_XXX level. */
2288 +int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
2289 +int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
2291 +#define SUSPEND_TIMER_TIMEOUT_MS 100
2292 +#define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
2294 +#define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
2295 +static const char *const suspend_state_names[] = {
2296 + "VC_SUSPEND_FORCE_CANCELED",
2297 + "VC_SUSPEND_REJECTED",
2298 + "VC_SUSPEND_FAILED",
2299 + "VC_SUSPEND_IDLE",
2300 + "VC_SUSPEND_REQUESTED",
2301 + "VC_SUSPEND_IN_PROGRESS",
2302 + "VC_SUSPEND_SUSPENDED"
2304 +#define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
2305 +static const char *const resume_state_names[] = {
2306 + "VC_RESUME_FAILED",
2308 + "VC_RESUME_REQUESTED",
2309 + "VC_RESUME_IN_PROGRESS",
2310 + "VC_RESUME_RESUMED"
2312 +/* The number of times we allow force suspend to timeout before actually
2313 +** _forcing_ suspend. This is to cater for SW which fails to release vchiq
2314 +** correctly - we don't want to prevent ARM suspend indefinitely in this case.
2316 +#define FORCE_SUSPEND_FAIL_MAX 8
2318 +/* The time in ms allowed for videocore to go idle when force suspend has been
2320 +#define FORCE_SUSPEND_TIMEOUT_MS 200
2323 +static void suspend_timer_callback(unsigned long context);
2324 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
2325 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
2328 +typedef struct user_service_struct {
2329 + VCHIQ_SERVICE_T *service;
2331 + VCHIQ_INSTANCE_T instance;
2333 + int dequeue_pending;
2334 + int message_available_pos;
2337 + struct semaphore insert_event;
2338 + struct semaphore remove_event;
2339 + VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
2342 +struct bulk_waiter_node {
2343 + struct bulk_waiter bulk_waiter;
2345 + struct list_head list;
2348 +struct vchiq_instance_struct {
2349 + VCHIQ_STATE_T *state;
2350 + VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
2351 + int completion_insert;
2352 + int completion_remove;
2353 + struct semaphore insert_event;
2354 + struct semaphore remove_event;
2355 + struct mutex completion_mutex;
2362 + struct list_head bulk_waiter_list;
2363 + struct mutex bulk_waiter_list_mutex;
2365 + struct proc_dir_entry *proc_entry;
2368 +typedef struct dump_context_struct {
2375 +static struct cdev vchiq_cdev;
2376 +static dev_t vchiq_devid;
2377 +static VCHIQ_STATE_T g_state;
2378 +static struct class *vchiq_class;
2379 +static struct device *vchiq_dev;
2380 +static DEFINE_SPINLOCK(msg_queue_spinlock);
2382 +static const char *const ioctl_names[] = {
2388 + "QUEUE_BULK_TRANSMIT",
2389 + "QUEUE_BULK_RECEIVE",
2390 + "AWAIT_COMPLETION",
2391 + "DEQUEUE_MESSAGE",
2396 + "RELEASE_SERVICE",
2397 + "SET_SERVICE_OPTION",
2401 +vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
2402 + (VCHIQ_IOC_MAX + 1));
2405 +dump_phys_mem(void *virt_addr, uint32_t num_bytes);
2407 +/****************************************************************************
2411 +***************************************************************************/
2413 +static VCHIQ_STATUS_T
2414 +add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
2415 + VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
2416 + void *bulk_userdata)
2418 + VCHIQ_COMPLETION_DATA_T *completion;
2419 + DEBUG_INITIALISE(g_state.local)
2421 + while (instance->completion_insert ==
2422 + (instance->completion_remove + MAX_COMPLETIONS)) {
2423 + /* Out of space - wait for the client */
2424 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2425 + vchiq_log_trace(vchiq_arm_log_level,
2426 + "add_completion - completion queue full");
2427 + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
2428 + if (down_interruptible(&instance->remove_event) != 0) {
2429 + vchiq_log_info(vchiq_arm_log_level,
2430 + "service_callback interrupted");
2431 + return VCHIQ_RETRY;
2432 + } else if (instance->closing) {
2433 + vchiq_log_info(vchiq_arm_log_level,
2434 + "service_callback closing");
2435 + return VCHIQ_ERROR;
2437 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2441 + &instance->completions[instance->completion_insert &
2442 + (MAX_COMPLETIONS - 1)];
2444 + completion->header = header;
2445 + completion->reason = reason;
2446 + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
2447 + completion->service_userdata = user_service->service;
2448 + completion->bulk_userdata = bulk_userdata;
2450 + if (reason == VCHIQ_SERVICE_CLOSED)
2451 + /* Take an extra reference, to be held until
2452 + this CLOSED notification is delivered. */
2453 + lock_service(user_service->service);
2455 + /* A write barrier is needed here to ensure that the entire completion
2456 + record is written out before the insert point. */
2459 + if (reason == VCHIQ_MESSAGE_AVAILABLE)
2460 + user_service->message_available_pos =
2461 + instance->completion_insert;
2462 + instance->completion_insert++;
2464 + up(&instance->insert_event);
2466 + return VCHIQ_SUCCESS;
2469 +/****************************************************************************
2473 +***************************************************************************/
2475 +static VCHIQ_STATUS_T
2476 +service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
2477 + VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
2479 + /* How do we ensure the callback goes to the right client?
2480 + ** The service_user data points to a USER_SERVICE_T record containing
2481 + ** the original callback and the user state structure, which contains a
2482 + ** circular buffer for completion records.
2484 + USER_SERVICE_T *user_service;
2485 + VCHIQ_SERVICE_T *service;
2486 + VCHIQ_INSTANCE_T instance;
2487 + DEBUG_INITIALISE(g_state.local)
2489 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2491 + service = handle_to_service(handle);
2493 + user_service = (USER_SERVICE_T *)service->base.userdata;
2494 + instance = user_service->instance;
2496 + if (!instance || instance->closing)
2497 + return VCHIQ_SUCCESS;
2499 + vchiq_log_trace(vchiq_arm_log_level,
2500 + "service_callback - service %lx(%d), reason %d, header %lx, "
2501 + "instance %lx, bulk_userdata %lx",
2502 + (unsigned long)user_service,
2503 + service->localport,
2504 + reason, (unsigned long)header,
2505 + (unsigned long)instance, (unsigned long)bulk_userdata);
2507 + if (header && user_service->is_vchi) {
2508 + spin_lock(&msg_queue_spinlock);
2509 + while (user_service->msg_insert ==
2510 + (user_service->msg_remove + MSG_QUEUE_SIZE)) {
2511 + spin_unlock(&msg_queue_spinlock);
2512 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2513 + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
2514 + vchiq_log_trace(vchiq_arm_log_level,
2515 + "service_callback - msg queue full");
2516 + /* If there is no MESSAGE_AVAILABLE in the completion
2519 + if ((user_service->message_available_pos -
2520 + instance->completion_remove) < 0) {
2521 + VCHIQ_STATUS_T status;
2522 + vchiq_log_info(vchiq_arm_log_level,
2523 + "Inserting extra MESSAGE_AVAILABLE");
2524 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2525 + status = add_completion(instance, reason,
2526 + NULL, user_service, bulk_userdata);
2527 + if (status != VCHIQ_SUCCESS) {
2528 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2533 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2534 + if (down_interruptible(&user_service->remove_event)
2536 + vchiq_log_info(vchiq_arm_log_level,
2537 + "service_callback interrupted");
2538 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2539 + return VCHIQ_RETRY;
2540 + } else if (instance->closing) {
2541 + vchiq_log_info(vchiq_arm_log_level,
2542 + "service_callback closing");
2543 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2544 + return VCHIQ_ERROR;
2546 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2547 + spin_lock(&msg_queue_spinlock);
2550 + user_service->msg_queue[user_service->msg_insert &
2551 + (MSG_QUEUE_SIZE - 1)] = header;
2552 + user_service->msg_insert++;
2553 + spin_unlock(&msg_queue_spinlock);
2555 + up(&user_service->insert_event);
2557 + /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
2558 + ** there is a MESSAGE_AVAILABLE in the completion queue then
2559 + ** bypass the completion queue.
2561 + if (((user_service->message_available_pos -
2562 + instance->completion_remove) >= 0) ||
2563 + user_service->dequeue_pending) {
2564 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2565 + user_service->dequeue_pending = 0;
2566 + return VCHIQ_SUCCESS;
2571 + DEBUG_TRACE(SERVICE_CALLBACK_LINE);
2573 + return add_completion(instance, reason, header, user_service,
2577 +/****************************************************************************
2579 +* user_service_free
2581 +***************************************************************************/
2583 +user_service_free(void *userdata)
2588 +/****************************************************************************
2592 +***************************************************************************/
2595 +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2597 + VCHIQ_INSTANCE_T instance = file->private_data;
2598 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2599 + VCHIQ_SERVICE_T *service = NULL;
2602 + DEBUG_INITIALISE(g_state.local)
2604 + vchiq_log_trace(vchiq_arm_log_level,
2605 + "vchiq_ioctl - instance %x, cmd %s, arg %lx",
2606 + (unsigned int)instance,
2607 + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
2608 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
2609 + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
2612 + case VCHIQ_IOC_SHUTDOWN:
2613 + if (!instance->connected)
2616 + /* Remove all services */
2618 + while ((service = next_service_by_instance(instance->state,
2619 + instance, &i)) != NULL) {
2620 + status = vchiq_remove_service(service->handle);
2621 + unlock_service(service);
2622 + if (status != VCHIQ_SUCCESS)
2627 + if (status == VCHIQ_SUCCESS) {
2628 + /* Wake the completion thread and ask it to exit */
2629 + instance->closing = 1;
2630 + up(&instance->insert_event);
2635 + case VCHIQ_IOC_CONNECT:
2636 + if (instance->connected) {
2640 + rc = mutex_lock_interruptible(&instance->state->mutex);
2642 + vchiq_log_error(vchiq_arm_log_level,
2643 + "vchiq: connect: could not lock mutex for "
2645 + instance->state->id, rc);
2649 + status = vchiq_connect_internal(instance->state, instance);
2650 + mutex_unlock(&instance->state->mutex);
2652 + if (status == VCHIQ_SUCCESS)
2653 + instance->connected = 1;
2655 + vchiq_log_error(vchiq_arm_log_level,
2656 + "vchiq: could not connect: %d", status);
2659 + case VCHIQ_IOC_CREATE_SERVICE: {
2660 + VCHIQ_CREATE_SERVICE_T args;
2661 + USER_SERVICE_T *user_service = NULL;
2665 + if (copy_from_user
2666 + (&args, (const void __user *)arg,
2667 + sizeof(args)) != 0) {
2672 + user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
2673 + if (!user_service) {
2678 + if (args.is_open) {
2679 + if (!instance->connected) {
2681 + kfree(user_service);
2684 + srvstate = VCHIQ_SRVSTATE_OPENING;
2687 + instance->connected ?
2688 + VCHIQ_SRVSTATE_LISTENING :
2689 + VCHIQ_SRVSTATE_HIDDEN;
2692 + userdata = args.params.userdata;
2693 + args.params.callback = service_callback;
2694 + args.params.userdata = user_service;
2695 + service = vchiq_add_service_internal(
2697 + &args.params, srvstate,
2698 + instance, user_service_free);
2700 + if (service != NULL) {
2701 + user_service->service = service;
2702 + user_service->userdata = userdata;
2703 + user_service->instance = instance;
2704 + user_service->is_vchi = args.is_vchi;
2705 + user_service->dequeue_pending = 0;
2706 + user_service->message_available_pos =
2707 + instance->completion_remove - 1;
2708 + user_service->msg_insert = 0;
2709 + user_service->msg_remove = 0;
2710 + sema_init(&user_service->insert_event, 0);
2711 + sema_init(&user_service->remove_event, 0);
2713 + if (args.is_open) {
2714 + status = vchiq_open_service_internal
2715 + (service, instance->pid);
2716 + if (status != VCHIQ_SUCCESS) {
2717 + vchiq_remove_service(service->handle);
2719 + ret = (status == VCHIQ_RETRY) ?
2725 + if (copy_to_user((void __user *)
2726 + &(((VCHIQ_CREATE_SERVICE_T __user *)
2728 + (const void *)&service->handle,
2729 + sizeof(service->handle)) != 0) {
2731 + vchiq_remove_service(service->handle);
2737 + kfree(user_service);
2741 + case VCHIQ_IOC_CLOSE_SERVICE: {
2742 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2744 + service = find_service_for_instance(instance, handle);
2745 + if (service != NULL)
2746 + status = vchiq_close_service(service->handle);
2751 + case VCHIQ_IOC_REMOVE_SERVICE: {
2752 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2754 + service = find_service_for_instance(instance, handle);
2755 + if (service != NULL)
2756 + status = vchiq_remove_service(service->handle);
2761 + case VCHIQ_IOC_USE_SERVICE:
2762 + case VCHIQ_IOC_RELEASE_SERVICE: {
2763 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
2765 + service = find_service_for_instance(instance, handle);
2766 + if (service != NULL) {
2767 + status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
2768 + vchiq_use_service_internal(service) :
2769 + vchiq_release_service_internal(service);
2770 + if (status != VCHIQ_SUCCESS) {
2771 + vchiq_log_error(vchiq_susp_log_level,
2772 + "%s: cmd %s returned error %d for "
2773 + "service %c%c%c%c:%03d",
2775 + (cmd == VCHIQ_IOC_USE_SERVICE) ?
2776 + "VCHIQ_IOC_USE_SERVICE" :
2777 + "VCHIQ_IOC_RELEASE_SERVICE",
2779 + VCHIQ_FOURCC_AS_4CHARS(
2780 + service->base.fourcc),
2781 + service->client_id);
2788 + case VCHIQ_IOC_QUEUE_MESSAGE: {
2789 + VCHIQ_QUEUE_MESSAGE_T args;
2790 + if (copy_from_user
2791 + (&args, (const void __user *)arg,
2792 + sizeof(args)) != 0) {
2797 + service = find_service_for_instance(instance, args.handle);
2799 + if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
2800 + /* Copy elements into kernel space */
2801 + VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
2802 + if (copy_from_user(elements, args.elements,
2803 + args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
2804 + status = vchiq_queue_message
2806 + elements, args.count);
2814 + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
2815 + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
2816 + VCHIQ_QUEUE_BULK_TRANSFER_T args;
2817 + struct bulk_waiter_node *waiter = NULL;
2818 + VCHIQ_BULK_DIR_T dir =
2819 + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
2820 + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
2822 + if (copy_from_user
2823 + (&args, (const void __user *)arg,
2824 + sizeof(args)) != 0) {
2829 + service = find_service_for_instance(instance, args.handle);
2835 + if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
2836 + waiter = kzalloc(sizeof(struct bulk_waiter_node),
2842 + args.userdata = &waiter->bulk_waiter;
2843 + } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
2844 + struct list_head *pos;
2845 + mutex_lock(&instance->bulk_waiter_list_mutex);
2846 + list_for_each(pos, &instance->bulk_waiter_list) {
2847 + if (list_entry(pos, struct bulk_waiter_node,
2848 + list)->pid == current->pid) {
2849 + waiter = list_entry(pos,
2850 + struct bulk_waiter_node,
2857 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2859 + vchiq_log_error(vchiq_arm_log_level,
2860 + "no bulk_waiter found for pid %d",
2865 + vchiq_log_info(vchiq_arm_log_level,
2866 + "found bulk_waiter %x for pid %d",
2867 + (unsigned int)waiter, current->pid);
2868 + args.userdata = &waiter->bulk_waiter;
2870 + status = vchiq_bulk_transfer
2872 + VCHI_MEM_HANDLE_INVALID,
2873 + args.data, args.size,
2874 + args.userdata, args.mode,
2878 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
2879 + !waiter->bulk_waiter.bulk) {
2880 + if (waiter->bulk_waiter.bulk) {
2881 + /* Cancel the signal when the transfer
2883 + spin_lock(&bulk_waiter_spinlock);
2884 + waiter->bulk_waiter.bulk->userdata = NULL;
2885 + spin_unlock(&bulk_waiter_spinlock);
2889 + const VCHIQ_BULK_MODE_T mode_waiting =
2890 + VCHIQ_BULK_MODE_WAITING;
2891 + waiter->pid = current->pid;
2892 + mutex_lock(&instance->bulk_waiter_list_mutex);
2893 + list_add(&waiter->list, &instance->bulk_waiter_list);
2894 + mutex_unlock(&instance->bulk_waiter_list_mutex);
2895 + vchiq_log_info(vchiq_arm_log_level,
2896 + "saved bulk_waiter %x for pid %d",
2897 + (unsigned int)waiter, current->pid);
2899 + if (copy_to_user((void __user *)
2900 + &(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
2902 + (const void *)&mode_waiting,
2903 + sizeof(mode_waiting)) != 0)
2908 + case VCHIQ_IOC_AWAIT_COMPLETION: {
2909 + VCHIQ_AWAIT_COMPLETION_T args;
2911 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2912 + if (!instance->connected) {
2917 + if (copy_from_user(&args, (const void __user *)arg,
2918 + sizeof(args)) != 0) {
2923 + mutex_lock(&instance->completion_mutex);
2925 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2926 + while ((instance->completion_remove ==
2927 + instance->completion_insert)
2928 + && !instance->closing) {
2930 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2931 + mutex_unlock(&instance->completion_mutex);
2932 + rc = down_interruptible(&instance->insert_event);
2933 + mutex_lock(&instance->completion_mutex);
2935 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2936 + vchiq_log_info(vchiq_arm_log_level,
2937 + "AWAIT_COMPLETION interrupted");
2942 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
2944 + /* A read memory barrier is needed to stop prefetch of a stale
2945 + ** completion record
2950 + int msgbufcount = args.msgbufcount;
2951 + for (ret = 0; ret < args.count; ret++) {
2952 + VCHIQ_COMPLETION_DATA_T *completion;
2953 + VCHIQ_SERVICE_T *service;
2954 + USER_SERVICE_T *user_service;
2955 + VCHIQ_HEADER_T *header;
2956 + if (instance->completion_remove ==
2957 + instance->completion_insert)
2959 + completion = &instance->completions[
2960 + instance->completion_remove &
2961 + (MAX_COMPLETIONS - 1)];
2963 + service = completion->service_userdata;
2964 + user_service = service->base.userdata;
2965 + completion->service_userdata =
2966 + user_service->userdata;
2968 + header = completion->header;
2970 + void __user *msgbuf;
2973 + msglen = header->size +
2974 + sizeof(VCHIQ_HEADER_T);
2975 + /* This must be a VCHIQ-style service */
2976 + if (args.msgbufsize < msglen) {
2978 + vchiq_arm_log_level,
2979 + "header %x: msgbufsize"
2980 + " %x < msglen %x",
2981 + (unsigned int)header,
2984 + WARN(1, "invalid message "
2990 + if (msgbufcount <= 0)
2991 + /* Stall here for lack of a
2992 + ** buffer for the message. */
2994 + /* Get the pointer from user space */
2996 + if (copy_from_user(&msgbuf,
2997 + (const void __user *)
2998 + &args.msgbufs[msgbufcount],
2999 + sizeof(msgbuf)) != 0) {
3005 + /* Copy the message to user space */
3006 + if (copy_to_user(msgbuf, header,
3013 + /* Now it has been copied, the message
3014 + ** can be released. */
3015 + vchiq_release_message(service->handle,
3018 + /* The completion must point to the
3020 + completion->header = msgbuf;
3023 + if (completion->reason ==
3024 + VCHIQ_SERVICE_CLOSED)
3025 + unlock_service(service);
3027 + if (copy_to_user((void __user *)(
3028 + (size_t)args.buf +
3029 + ret * sizeof(VCHIQ_COMPLETION_DATA_T)),
3031 + sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
3037 + instance->completion_remove++;
3040 + if (msgbufcount != args.msgbufcount) {
3041 + if (copy_to_user((void __user *)
3042 + &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
3045 + sizeof(msgbufcount)) != 0) {
3052 + up(&instance->remove_event);
3053 + mutex_unlock(&instance->completion_mutex);
3054 + DEBUG_TRACE(AWAIT_COMPLETION_LINE);
3057 + case VCHIQ_IOC_DEQUEUE_MESSAGE: {
3058 + VCHIQ_DEQUEUE_MESSAGE_T args;
3059 + USER_SERVICE_T *user_service;
3060 + VCHIQ_HEADER_T *header;
3062 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3063 + if (copy_from_user
3064 + (&args, (const void __user *)arg,
3065 + sizeof(args)) != 0) {
3069 + service = find_service_for_instance(instance, args.handle);
3074 + user_service = (USER_SERVICE_T *)service->base.userdata;
3075 + if (user_service->is_vchi == 0) {
3080 + spin_lock(&msg_queue_spinlock);
3081 + if (user_service->msg_remove == user_service->msg_insert) {
3082 + if (!args.blocking) {
3083 + spin_unlock(&msg_queue_spinlock);
3084 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3085 + ret = -EWOULDBLOCK;
3088 + user_service->dequeue_pending = 1;
3090 + spin_unlock(&msg_queue_spinlock);
3091 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3092 + if (down_interruptible(
3093 + &user_service->insert_event) != 0) {
3094 + vchiq_log_info(vchiq_arm_log_level,
3095 + "DEQUEUE_MESSAGE interrupted");
3099 + spin_lock(&msg_queue_spinlock);
3100 + } while (user_service->msg_remove ==
3101 + user_service->msg_insert);
3107 + BUG_ON((int)(user_service->msg_insert -
3108 + user_service->msg_remove) < 0);
3110 + header = user_service->msg_queue[user_service->msg_remove &
3111 + (MSG_QUEUE_SIZE - 1)];
3112 + user_service->msg_remove++;
3113 + spin_unlock(&msg_queue_spinlock);
3115 + up(&user_service->remove_event);
3116 + if (header == NULL)
3118 + else if (header->size <= args.bufsize) {
3119 + /* Copy to user space if msgbuf is not NULL */
3120 + if ((args.buf == NULL) ||
3121 + (copy_to_user((void __user *)args.buf,
3123 + header->size) == 0)) {
3124 + ret = header->size;
3125 + vchiq_release_message(
3131 + vchiq_log_error(vchiq_arm_log_level,
3132 + "header %x: bufsize %x < size %x",
3133 + (unsigned int)header, args.bufsize,
3135 + WARN(1, "invalid size\n");
3138 + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
3141 + case VCHIQ_IOC_GET_CLIENT_ID: {
3142 + VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
3144 + ret = vchiq_get_client_id(handle);
3147 + case VCHIQ_IOC_GET_CONFIG: {
3148 + VCHIQ_GET_CONFIG_T args;
3149 + VCHIQ_CONFIG_T config;
3151 + if (copy_from_user(&args, (const void __user *)arg,
3152 + sizeof(args)) != 0) {
3156 + if (args.config_size > sizeof(config)) {
3160 + status = vchiq_get_config(instance, args.config_size, &config);
3161 + if (status == VCHIQ_SUCCESS) {
3162 + if (copy_to_user((void __user *)args.pconfig,
3163 + &config, args.config_size) != 0) {
3170 + case VCHIQ_IOC_SET_SERVICE_OPTION: {
3171 + VCHIQ_SET_SERVICE_OPTION_T args;
3173 + if (copy_from_user(
3174 + &args, (const void __user *)arg,
3175 + sizeof(args)) != 0) {
3180 + service = find_service_for_instance(instance, args.handle);
3186 + status = vchiq_set_service_option(
3187 + args.handle, args.option, args.value);
3190 + case VCHIQ_IOC_DUMP_PHYS_MEM: {
3191 + VCHIQ_DUMP_MEM_T args;
3193 + if (copy_from_user
3194 + (&args, (const void __user *)arg,
3195 + sizeof(args)) != 0) {
3199 + dump_phys_mem(args.virt_addr, args.num_bytes);
3208 + unlock_service(service);
3211 + if (status == VCHIQ_ERROR)
3213 + else if (status == VCHIQ_RETRY)
3217 + if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
3218 + (ret != -EWOULDBLOCK))
3219 + vchiq_log_info(vchiq_arm_log_level,
3220 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3221 + (unsigned long)instance,
3222 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3223 + ioctl_names[_IOC_NR(cmd)] :
3227 + vchiq_log_trace(vchiq_arm_log_level,
3228 + " ioctl instance %lx, cmd %s -> status %d, %ld",
3229 + (unsigned long)instance,
3230 + (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
3231 + ioctl_names[_IOC_NR(cmd)] :
3238 +/****************************************************************************
3242 +***************************************************************************/
3245 +vchiq_open(struct inode *inode, struct file *file)
3247 + int dev = iminor(inode) & 0x0f;
3248 + vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
3250 + case VCHIQ_MINOR: {
3252 + VCHIQ_STATE_T *state = vchiq_get_state();
3253 + VCHIQ_INSTANCE_T instance;
3256 + vchiq_log_error(vchiq_arm_log_level,
3257 + "vchiq has no connection to VideoCore");
3261 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
3265 + instance->state = state;
3266 + instance->pid = current->tgid;
3268 + ret = vchiq_proc_add_instance(instance);
3274 + sema_init(&instance->insert_event, 0);
3275 + sema_init(&instance->remove_event, 0);
3276 + mutex_init(&instance->completion_mutex);
3277 + mutex_init(&instance->bulk_waiter_list_mutex);
3278 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
3280 + file->private_data = instance;
3284 + vchiq_log_error(vchiq_arm_log_level,
3285 + "Unknown minor device: %d", dev);
3292 +/****************************************************************************
3296 +***************************************************************************/
3299 +vchiq_release(struct inode *inode, struct file *file)
3301 + int dev = iminor(inode) & 0x0f;
3304 + case VCHIQ_MINOR: {
3305 + VCHIQ_INSTANCE_T instance = file->private_data;
3306 + VCHIQ_STATE_T *state = vchiq_get_state();
3307 + VCHIQ_SERVICE_T *service;
3310 + vchiq_log_info(vchiq_arm_log_level,
3311 + "vchiq_release: instance=%lx",
3312 + (unsigned long)instance);
3319 + /* Ensure videocore is awake to allow termination. */
3320 + vchiq_use_internal(instance->state, NULL,
3323 + mutex_lock(&instance->completion_mutex);
3325 + /* Wake the completion thread and ask it to exit */
3326 + instance->closing = 1;
3327 + up(&instance->insert_event);
3329 + mutex_unlock(&instance->completion_mutex);
3331 + /* Wake the slot handler if the completion queue is full. */
3332 + up(&instance->remove_event);
3334 + /* Mark all services for termination... */
3336 + while ((service = next_service_by_instance(state, instance,
3338 + USER_SERVICE_T *user_service = service->base.userdata;
3340 + /* Wake the slot handler if the msg queue is full. */
3341 + up(&user_service->remove_event);
3343 + vchiq_terminate_service_internal(service);
3344 + unlock_service(service);
3347 + /* ...and wait for them to die */
3349 + while ((service = next_service_by_instance(state, instance, &i))
3351 + USER_SERVICE_T *user_service = service->base.userdata;
3353 + down(&service->remove_event);
3355 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
3357 + spin_lock(&msg_queue_spinlock);
3359 + while (user_service->msg_remove !=
3360 + user_service->msg_insert) {
3361 + VCHIQ_HEADER_T *header = user_service->
3362 + msg_queue[user_service->msg_remove &
3363 + (MSG_QUEUE_SIZE - 1)];
3364 + user_service->msg_remove++;
3365 + spin_unlock(&msg_queue_spinlock);
3368 + vchiq_release_message(
3371 + spin_lock(&msg_queue_spinlock);
3374 + spin_unlock(&msg_queue_spinlock);
3376 + unlock_service(service);
3379 + /* Release any closed services */
3380 + while (instance->completion_remove !=
3381 + instance->completion_insert) {
3382 + VCHIQ_COMPLETION_DATA_T *completion;
3383 + VCHIQ_SERVICE_T *service;
3384 + completion = &instance->completions[
3385 + instance->completion_remove &
3386 + (MAX_COMPLETIONS - 1)];
3387 + service = completion->service_userdata;
3388 + if (completion->reason == VCHIQ_SERVICE_CLOSED)
3389 + unlock_service(service);
3390 + instance->completion_remove++;
3393 + /* Release the PEER service count. */
3394 + vchiq_release_internal(instance->state, NULL);
3397 + struct list_head *pos, *next;
3398 + list_for_each_safe(pos, next,
3399 + &instance->bulk_waiter_list) {
3400 + struct bulk_waiter_node *waiter;
3401 + waiter = list_entry(pos,
3402 + struct bulk_waiter_node,
3405 + vchiq_log_info(vchiq_arm_log_level,
3406 + "bulk_waiter - cleaned up %x "
3408 + (unsigned int)waiter, waiter->pid);
3413 + vchiq_proc_remove_instance(instance);
3416 + file->private_data = NULL;
3420 + vchiq_log_error(vchiq_arm_log_level,
3421 + "Unknown minor device: %d", dev);
3429 +/****************************************************************************
3433 +***************************************************************************/
3436 +vchiq_dump(void *dump_context, const char *str, int len)
3438 + DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
3440 + if (context->actual < context->space) {
3442 + if (context->offset > 0) {
3443 + int skip_bytes = min(len, (int)context->offset);
3444 + str += skip_bytes;
3445 + len -= skip_bytes;
3446 + context->offset -= skip_bytes;
3447 + if (context->offset > 0)
3450 + copy_bytes = min(len, (int)(context->space - context->actual));
3451 + if (copy_bytes == 0)
3453 + if (copy_to_user(context->buf + context->actual, str,
3455 + context->actual = -EFAULT;
3456 + context->actual += copy_bytes;
3457 + len -= copy_bytes;
3459 + /* If tne terminating NUL is included in the length, then it
3460 + ** marks the end of a line and should be replaced with a
3461 + ** carriage return. */
3462 + if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
3464 + if (copy_to_user(context->buf + context->actual - 1,
3466 + context->actual = -EFAULT;
3471 +/****************************************************************************
3473 +* vchiq_dump_platform_instance_state
3475 +***************************************************************************/
3478 +vchiq_dump_platform_instances(void *dump_context)
3480 + VCHIQ_STATE_T *state = vchiq_get_state();
3485 + /* There is no list of instances, so instead scan all services,
3486 + marking those that have been dumped. */
3488 + for (i = 0; i < state->unused_service; i++) {
3489 + VCHIQ_SERVICE_T *service = state->services[i];
3490 + VCHIQ_INSTANCE_T instance;
3492 + if (service && (service->base.callback == service_callback)) {
3493 + instance = service->instance;
3495 + instance->mark = 0;
3499 + for (i = 0; i < state->unused_service; i++) {
3500 + VCHIQ_SERVICE_T *service = state->services[i];
3501 + VCHIQ_INSTANCE_T instance;
3503 + if (service && (service->base.callback == service_callback)) {
3504 + instance = service->instance;
3505 + if (instance && !instance->mark) {
3506 + len = snprintf(buf, sizeof(buf),
3507 + "Instance %x: pid %d,%s completions "
3509 + (unsigned int)instance, instance->pid,
3510 + instance->connected ? " connected, " :
3512 + instance->completion_insert -
3513 + instance->completion_remove,
3516 + vchiq_dump(dump_context, buf, len + 1);
3518 + instance->mark = 1;
3524 +/****************************************************************************
3526 +* vchiq_dump_platform_service_state
3528 +***************************************************************************/
3531 +vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
3533 + USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
3537 + len = snprintf(buf, sizeof(buf), " instance %x",
3538 + (unsigned int)service->instance);
3540 + if ((service->base.callback == service_callback) &&
3541 + user_service->is_vchi) {
3542 + len += snprintf(buf + len, sizeof(buf) - len,
3543 + ", %d/%d messages",
3544 + user_service->msg_insert - user_service->msg_remove,
3547 + if (user_service->dequeue_pending)
3548 + len += snprintf(buf + len, sizeof(buf) - len,
3549 + " (dequeue pending)");
3552 + vchiq_dump(dump_context, buf, len + 1);
3555 +/****************************************************************************
3559 +***************************************************************************/
3562 +dump_phys_mem(void *virt_addr, uint32_t num_bytes)
3565 + uint8_t *end_virt_addr = virt_addr + num_bytes;
3571 + struct page *page;
3572 + struct page **pages;
3573 + uint8_t *kmapped_virt_ptr;
3575 + /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
3577 + virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
3578 + end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
3581 + offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
3582 + end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
3584 + num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
3586 + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
3587 + if (pages == NULL) {
3588 + vchiq_log_error(vchiq_arm_log_level,
3589 + "Unable to allocation memory for %d pages\n",
3594 + down_read(¤t->mm->mmap_sem);
3595 + rc = get_user_pages(current, /* task */
3596 + current->mm, /* mm */
3597 + (unsigned long)virt_addr, /* start */
3598 + num_pages, /* len */
3601 + pages, /* pages (array of page pointers) */
3603 + up_read(¤t->mm->mmap_sem);
3608 + while (offset < end_offset) {
3610 + int page_offset = offset % PAGE_SIZE;
3611 + page_idx = offset / PAGE_SIZE;
3613 + if (page_idx != prev_idx) {
3617 + page = pages[page_idx];
3618 + kmapped_virt_ptr = kmap(page);
3620 + prev_idx = page_idx;
3623 + if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
3624 + vchiq_log_dump_mem("ph",
3625 + (uint32_t)(unsigned long)&kmapped_virt_ptr[
3627 + &kmapped_virt_ptr[page_offset], 16);
3634 + for (page_idx = 0; page_idx < num_pages; page_idx++)
3635 + page_cache_release(pages[page_idx]);
3640 +/****************************************************************************
3644 +***************************************************************************/
3647 +vchiq_read(struct file *file, char __user *buf,
3648 + size_t count, loff_t *ppos)
3650 + DUMP_CONTEXT_T context;
3651 + context.buf = buf;
3652 + context.actual = 0;
3653 + context.space = count;
3654 + context.offset = *ppos;
3656 + vchiq_dump_state(&context, &g_state);
3658 + *ppos += context.actual;
3660 + return context.actual;
3664 +vchiq_get_state(void)
3667 + if (g_state.remote == NULL)
3668 + printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
3669 + else if (g_state.remote->initialised != 1)
3670 + printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
3671 + __func__, g_state.remote->initialised);
3673 + return ((g_state.remote != NULL) &&
3674 + (g_state.remote->initialised == 1)) ? &g_state : NULL;
3677 +static const struct file_operations
3679 + .owner = THIS_MODULE,
3680 + .unlocked_ioctl = vchiq_ioctl,
3681 + .open = vchiq_open,
3682 + .release = vchiq_release,
3683 + .read = vchiq_read
3687 + * Autosuspend related functionality
3691 +vchiq_videocore_wanted(VCHIQ_STATE_T *state)
3693 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3695 + /* autosuspend not supported - always return wanted */
3697 + else if (arm_state->blocked_count)
3699 + else if (!arm_state->videocore_use_count)
3700 + /* usage count zero - check for override unless we're forcing */
3701 + if (arm_state->resume_blocked)
3704 + return vchiq_platform_videocore_wanted(state);
3706 + /* non-zero usage count - videocore still required */
3710 +static VCHIQ_STATUS_T
3711 +vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
3712 + VCHIQ_HEADER_T *header,
3713 + VCHIQ_SERVICE_HANDLE_T service_user,
3716 + vchiq_log_error(vchiq_susp_log_level,
3717 + "%s callback reason %d", __func__, reason);
3722 +vchiq_keepalive_thread_func(void *v)
3724 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
3725 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3727 + VCHIQ_STATUS_T status;
3728 + VCHIQ_INSTANCE_T instance;
3729 + VCHIQ_SERVICE_HANDLE_T ka_handle;
3731 + VCHIQ_SERVICE_PARAMS_T params = {
3732 + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
3733 + .callback = vchiq_keepalive_vchiq_callback,
3734 + .version = KEEPALIVE_VER,
3735 + .version_min = KEEPALIVE_VER_MIN
3738 + status = vchiq_initialise(&instance);
3739 + if (status != VCHIQ_SUCCESS) {
3740 + vchiq_log_error(vchiq_susp_log_level,
3741 + "%s vchiq_initialise failed %d", __func__, status);
3745 + status = vchiq_connect(instance);
3746 + if (status != VCHIQ_SUCCESS) {
3747 + vchiq_log_error(vchiq_susp_log_level,
3748 + "%s vchiq_connect failed %d", __func__, status);
3752 + status = vchiq_add_service(instance, ¶ms, &ka_handle);
3753 + if (status != VCHIQ_SUCCESS) {
3754 + vchiq_log_error(vchiq_susp_log_level,
3755 + "%s vchiq_open_service failed %d", __func__, status);
3760 + long rc = 0, uc = 0;
3761 + if (wait_for_completion_interruptible(&arm_state->ka_evt)
3763 + vchiq_log_error(vchiq_susp_log_level,
3764 + "%s interrupted", __func__);
3765 + flush_signals(current);
3769 + /* read and clear counters. Do release_count then use_count to
3770 + * prevent getting more releases than uses */
3771 + rc = atomic_xchg(&arm_state->ka_release_count, 0);
3772 + uc = atomic_xchg(&arm_state->ka_use_count, 0);
3774 + /* Call use/release service the requisite number of times.
3775 + * Process use before release so use counts don't go negative */
3777 + atomic_inc(&arm_state->ka_use_ack_count);
3778 + status = vchiq_use_service(ka_handle);
3779 + if (status != VCHIQ_SUCCESS) {
3780 + vchiq_log_error(vchiq_susp_log_level,
3781 + "%s vchiq_use_service error %d",
3782 + __func__, status);
3786 + status = vchiq_release_service(ka_handle);
3787 + if (status != VCHIQ_SUCCESS) {
3788 + vchiq_log_error(vchiq_susp_log_level,
3789 + "%s vchiq_release_service error %d",
3790 + __func__, status);
3796 + vchiq_shutdown(instance);
3804 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
3806 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
3809 + rwlock_init(&arm_state->susp_res_lock);
3811 + init_completion(&arm_state->ka_evt);
3812 + atomic_set(&arm_state->ka_use_count, 0);
3813 + atomic_set(&arm_state->ka_use_ack_count, 0);
3814 + atomic_set(&arm_state->ka_release_count, 0);
3816 + init_completion(&arm_state->vc_suspend_complete);
3818 + init_completion(&arm_state->vc_resume_complete);
3819 + /* Initialise to 'done' state. We only want to block on resume
3820 + * completion while videocore is suspended. */
3821 + set_resume_state(arm_state, VC_RESUME_RESUMED);
3823 + init_completion(&arm_state->resume_blocker);
3824 + /* Initialise to 'done' state. We only want to block on this
3825 + * completion while resume is blocked */
3826 + complete_all(&arm_state->resume_blocker);
3828 + init_completion(&arm_state->blocked_blocker);
3829 + /* Initialise to 'done' state. We only want to block on this
3830 + * completion while things are waiting on the resume blocker */
3831 + complete_all(&arm_state->blocked_blocker);
3833 + arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
3834 + arm_state->suspend_timer_running = 0;
3835 + init_timer(&arm_state->suspend_timer);
3836 + arm_state->suspend_timer.data = (unsigned long)(state);
3837 + arm_state->suspend_timer.function = suspend_timer_callback;
3839 + arm_state->first_connect = 0;
3846 +** Functions to modify the state variables;
3847 +** set_suspend_state
3848 +** set_resume_state
3850 +** There are more state variables than we might like, so ensure they remain in
3851 +** step. Suspend and resume state are maintained separately, since most of
3852 +** these state machines can operate independently. However, there are a few
3853 +** states where state transitions in one state machine cause a reset to the
3854 +** other state machine. In addition, there are some completion events which
3855 +** need to occur on state machine reset and end-state(s), so these are also
3856 +** dealt with in these functions.
3858 +** In all states we set the state variable according to the input, but in some
3859 +** cases we perform additional steps outlined below;
3861 +** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
3862 +** The suspend completion is completed after any suspend
3863 +** attempt. When we reset the state machine we also reset
3864 +** the completion. This reset occurs when videocore is
3865 +** resumed, and also if we initiate suspend after a suspend
3868 +** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
3869 +** suspend - ie from this point on we must try to suspend
3870 +** before resuming can occur. We therefore also reset the
3871 +** resume state machine to VC_RESUME_IDLE in this state.
3873 +** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
3874 +** complete_all on the suspend completion to notify
3875 +** anything waiting for suspend to happen.
3877 +** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
3878 +** initiate resume, so no need to alter resume state.
3879 +** We call complete_all on the suspend completion to notify
3880 +** of suspend rejection.
3882 +** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
3883 +** suspend completion and reset the resume state machine.
3885 +** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
3886 +** resume completion is in it's 'done' state whenever
3887 +** videcore is running. Therfore, the VC_RESUME_IDLE state
3888 +** implies that videocore is suspended.
3889 +** Hence, any thread which needs to wait until videocore is
3890 +** running can wait on this completion - it will only block
3891 +** if videocore is suspended.
3893 +** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
3894 +** Call complete_all on the resume completion to unblock
3895 +** any threads waiting for resume. Also reset the suspend
3896 +** state machine to it's idle state.
3898 +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
3902 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
3903 + enum vc_suspend_status new_state)
3905 + /* set the state in all cases */
3906 + arm_state->vc_suspend_state = new_state;
3908 + /* state specific additional actions */
3909 + switch (new_state) {
3910 + case VC_SUSPEND_FORCE_CANCELED:
3911 + complete_all(&arm_state->vc_suspend_complete);
3913 + case VC_SUSPEND_REJECTED:
3914 + complete_all(&arm_state->vc_suspend_complete);
3916 + case VC_SUSPEND_FAILED:
3917 + complete_all(&arm_state->vc_suspend_complete);
3918 + arm_state->vc_resume_state = VC_RESUME_RESUMED;
3919 + complete_all(&arm_state->vc_resume_complete);
3921 + case VC_SUSPEND_IDLE:
3922 + reinit_completion(&arm_state->vc_suspend_complete);
3924 + case VC_SUSPEND_REQUESTED:
3926 + case VC_SUSPEND_IN_PROGRESS:
3927 + set_resume_state(arm_state, VC_RESUME_IDLE);
3929 + case VC_SUSPEND_SUSPENDED:
3930 + complete_all(&arm_state->vc_suspend_complete);
3939 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
3940 + enum vc_resume_status new_state)
3942 + /* set the state in all cases */
3943 + arm_state->vc_resume_state = new_state;
3945 + /* state specific additional actions */
3946 + switch (new_state) {
3947 + case VC_RESUME_FAILED:
3949 + case VC_RESUME_IDLE:
3950 + reinit_completion(&arm_state->vc_resume_complete);
3952 + case VC_RESUME_REQUESTED:
3954 + case VC_RESUME_IN_PROGRESS:
3956 + case VC_RESUME_RESUMED:
3957 + complete_all(&arm_state->vc_resume_complete);
3958 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
3967 +/* should be called with the write lock held */
3969 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3971 + del_timer(&arm_state->suspend_timer);
3972 + arm_state->suspend_timer.expires = jiffies +
3973 + msecs_to_jiffies(arm_state->
3974 + suspend_timer_timeout);
3975 + add_timer(&arm_state->suspend_timer);
3976 + arm_state->suspend_timer_running = 1;
3979 +/* should be called with the write lock held */
3981 +stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
3983 + if (arm_state->suspend_timer_running) {
3984 + del_timer(&arm_state->suspend_timer);
3985 + arm_state->suspend_timer_running = 0;
3990 +need_resume(VCHIQ_STATE_T *state)
3992 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
3993 + return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
3994 + (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
3995 + vchiq_videocore_wanted(state);
3999 +block_resume(VCHIQ_ARM_STATE_T *arm_state)
4001 + int status = VCHIQ_SUCCESS;
4002 + const unsigned long timeout_val =
4003 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
4004 + int resume_count = 0;
4006 + /* Allow any threads which were blocked by the last force suspend to
4007 + * complete if they haven't already. Only give this one shot; if
4008 + * blocked_count is incremented after blocked_blocker is completed
4009 + * (which only happens when blocked_count hits 0) then those threads
4010 + * will have to wait until next time around */
4011 + if (arm_state->blocked_count) {
4012 + reinit_completion(&arm_state->blocked_blocker);
4013 + write_unlock_bh(&arm_state->susp_res_lock);
4014 + vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
4015 + "blocked clients", __func__);
4016 + if (wait_for_completion_interruptible_timeout(
4017 + &arm_state->blocked_blocker, timeout_val)
4019 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4020 + "previously blocked clients failed" , __func__);
4021 + status = VCHIQ_ERROR;
4022 + write_lock_bh(&arm_state->susp_res_lock);
4025 + vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
4026 + "clients resumed", __func__);
4027 + write_lock_bh(&arm_state->susp_res_lock);
4030 + /* We need to wait for resume to complete if it's in process */
4031 + while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
4032 + arm_state->vc_resume_state > VC_RESUME_IDLE) {
4033 + if (resume_count > 1) {
4034 + status = VCHIQ_ERROR;
4035 + vchiq_log_error(vchiq_susp_log_level, "%s waited too "
4036 + "many times for resume" , __func__);
4039 + write_unlock_bh(&arm_state->susp_res_lock);
4040 + vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
4042 + if (wait_for_completion_interruptible_timeout(
4043 + &arm_state->vc_resume_complete, timeout_val)
4045 + vchiq_log_error(vchiq_susp_log_level, "%s wait for "
4046 + "resume failed (%s)", __func__,
4047 + resume_state_names[arm_state->vc_resume_state +
4048 + VC_RESUME_NUM_OFFSET]);
4049 + status = VCHIQ_ERROR;
4050 + write_lock_bh(&arm_state->susp_res_lock);
4053 + vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
4054 + write_lock_bh(&arm_state->susp_res_lock);
4057 + reinit_completion(&arm_state->resume_blocker);
4058 + arm_state->resume_blocked = 1;
4065 +unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
4067 + complete_all(&arm_state->resume_blocker);
4068 + arm_state->resume_blocked = 0;
4071 +/* Initiate suspend via slot handler. Should be called with the write lock
4074 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
4076 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4077 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4082 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4083 + status = VCHIQ_SUCCESS;
4086 + switch (arm_state->vc_suspend_state) {
4087 + case VC_SUSPEND_REQUESTED:
4088 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
4089 + "requested", __func__);
4091 + case VC_SUSPEND_IN_PROGRESS:
4092 + vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
4093 + "progress", __func__);
4097 + /* We don't expect to be in other states, so log but continue
4099 + vchiq_log_error(vchiq_susp_log_level,
4100 + "%s unexpected suspend state %s", __func__,
4101 + suspend_state_names[arm_state->vc_suspend_state +
4102 + VC_SUSPEND_NUM_OFFSET]);
4103 + /* fall through */
4104 + case VC_SUSPEND_REJECTED:
4105 + case VC_SUSPEND_FAILED:
4106 + /* Ensure any idle state actions have been run */
4107 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4108 + /* fall through */
4109 + case VC_SUSPEND_IDLE:
4110 + vchiq_log_info(vchiq_susp_log_level,
4111 + "%s: suspending", __func__);
4112 + set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
4113 + /* kick the slot handler thread to initiate suspend */
4114 + request_poll(state, NULL, 0);
4119 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4124 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
4126 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4132 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4134 + write_lock_bh(&arm_state->susp_res_lock);
4135 + if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
4136 + arm_state->vc_resume_state == VC_RESUME_RESUMED) {
4137 + set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
4140 + write_unlock_bh(&arm_state->susp_res_lock);
4143 + vchiq_platform_suspend(state);
4146 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4152 +output_timeout_error(VCHIQ_STATE_T *state)
4154 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4155 + char service_err[50] = "";
4156 + int vc_use_count = arm_state->videocore_use_count;
4157 + int active_services = state->unused_service;
4160 + if (!arm_state->videocore_use_count) {
4161 + snprintf(service_err, 50, " Videocore usecount is 0");
4164 + for (i = 0; i < active_services; i++) {
4165 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4166 + if (service_ptr && service_ptr->service_use_count &&
4167 + (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
4168 + snprintf(service_err, 50, " %c%c%c%c(%d) service has "
4169 + "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
4170 + service_ptr->base.fourcc),
4171 + service_ptr->client_id,
4172 + service_ptr->service_use_count,
4173 + service_ptr->service_use_count ==
4174 + vc_use_count ? "" : " (+ more)");
4180 + vchiq_log_error(vchiq_susp_log_level,
4181 + "timed out waiting for vc suspend (%d).%s",
4182 + arm_state->autosuspend_override, service_err);
4186 +/* Try to get videocore into suspended state, regardless of autosuspend state.
4187 +** We don't actually force suspend, since videocore may get into a bad state
4188 +** if we force suspend at a bad time. Instead, we wait for autosuspend to
4189 +** determine a good point to suspend. If this doesn't happen within 100ms we
4192 +** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
4193 +** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
4196 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
4198 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4199 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
4206 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4208 + write_lock_bh(&arm_state->susp_res_lock);
4210 + status = block_resume(arm_state);
4211 + if (status != VCHIQ_SUCCESS)
4213 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4214 + /* Already suspended - just block resume and exit */
4215 + vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
4217 + status = VCHIQ_SUCCESS;
4219 + } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
4220 + /* initiate suspend immediately in the case that we're waiting
4221 + * for the timeout */
4222 + stop_suspend_timer(arm_state);
4223 + if (!vchiq_videocore_wanted(state)) {
4224 + vchiq_log_info(vchiq_susp_log_level, "%s videocore "
4225 + "idle, initiating suspend", __func__);
4226 + status = vchiq_arm_vcsuspend(state);
4227 + } else if (arm_state->autosuspend_override <
4228 + FORCE_SUSPEND_FAIL_MAX) {
4229 + vchiq_log_info(vchiq_susp_log_level, "%s letting "
4230 + "videocore go idle", __func__);
4231 + status = VCHIQ_SUCCESS;
4233 + vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
4234 + "many times - attempting suspend", __func__);
4235 + status = vchiq_arm_vcsuspend(state);
4238 + vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
4239 + "in progress - wait for completion", __func__);
4240 + status = VCHIQ_SUCCESS;
4243 + /* Wait for suspend to happen due to system idle (not forced..) */
4244 + if (status != VCHIQ_SUCCESS)
4245 + goto unblock_resume;
4248 + write_unlock_bh(&arm_state->susp_res_lock);
4250 + rc = wait_for_completion_interruptible_timeout(
4251 + &arm_state->vc_suspend_complete,
4252 + msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
4254 + write_lock_bh(&arm_state->susp_res_lock);
4256 + vchiq_log_warning(vchiq_susp_log_level, "%s "
4257 + "interrupted waiting for suspend", __func__);
4258 + status = VCHIQ_ERROR;
4259 + goto unblock_resume;
4260 + } else if (rc == 0) {
4261 + if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
4262 + /* Repeat timeout once if in progress */
4268 + arm_state->autosuspend_override++;
4269 + output_timeout_error(state);
4271 + status = VCHIQ_RETRY;
4272 + goto unblock_resume;
4274 + } while (0 < (repeat--));
4276 + /* Check and report state in case we need to abort ARM suspend */
4277 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
4278 + status = VCHIQ_RETRY;
4279 + vchiq_log_error(vchiq_susp_log_level,
4280 + "%s videocore suspend failed (state %s)", __func__,
4281 + suspend_state_names[arm_state->vc_suspend_state +
4282 + VC_SUSPEND_NUM_OFFSET]);
4283 + /* Reset the state only if it's still in an error state.
4284 + * Something could have already initiated another suspend. */
4285 + if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
4286 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4288 + goto unblock_resume;
4291 + /* successfully suspended - unlock and exit */
4295 + /* all error states need to unblock resume before exit */
4296 + unblock_resume(arm_state);
4299 + write_unlock_bh(&arm_state->susp_res_lock);
4302 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
4307 +vchiq_check_suspend(VCHIQ_STATE_T *state)
4309 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4314 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4316 + write_lock_bh(&arm_state->susp_res_lock);
4317 + if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
4318 + arm_state->first_connect &&
4319 + !vchiq_videocore_wanted(state)) {
4320 + vchiq_arm_vcsuspend(state);
4322 + write_unlock_bh(&arm_state->susp_res_lock);
4325 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4331 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
4333 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4340 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4342 + write_lock_bh(&arm_state->susp_res_lock);
4343 + unblock_resume(arm_state);
4344 + resume = vchiq_check_resume(state);
4345 + write_unlock_bh(&arm_state->susp_res_lock);
4348 + if (wait_for_completion_interruptible(
4349 + &arm_state->vc_resume_complete) < 0) {
4350 + vchiq_log_error(vchiq_susp_log_level,
4351 + "%s interrupted", __func__);
4352 + /* failed, cannot accurately derive suspend
4353 + * state, so exit early. */
4358 + read_lock_bh(&arm_state->susp_res_lock);
4359 + if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
4360 + vchiq_log_info(vchiq_susp_log_level,
4361 + "%s: Videocore remains suspended", __func__);
4363 + vchiq_log_info(vchiq_susp_log_level,
4364 + "%s: Videocore resumed", __func__);
4367 + read_unlock_bh(&arm_state->susp_res_lock);
4369 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4373 +/* This function should be called with the write lock held */
4375 +vchiq_check_resume(VCHIQ_STATE_T *state)
4377 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4383 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4385 + if (need_resume(state)) {
4386 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4387 + request_poll(state, NULL, 0);
4392 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4397 +vchiq_platform_check_resume(VCHIQ_STATE_T *state)
4399 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4405 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4407 + write_lock_bh(&arm_state->susp_res_lock);
4408 + if (arm_state->wake_address == 0) {
4409 + vchiq_log_info(vchiq_susp_log_level,
4410 + "%s: already awake", __func__);
4413 + if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
4414 + vchiq_log_info(vchiq_susp_log_level,
4415 + "%s: already resuming", __func__);
4419 + if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
4420 + set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
4423 + vchiq_log_trace(vchiq_susp_log_level,
4424 + "%s: not resuming (resume state %s)", __func__,
4425 + resume_state_names[arm_state->vc_resume_state +
4426 + VC_RESUME_NUM_OFFSET]);
4429 + write_unlock_bh(&arm_state->susp_res_lock);
4432 + vchiq_platform_resume(state);
4435 + vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
4443 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
4444 + enum USE_TYPE_E use_type)
4446 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4447 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4450 + int local_uc, local_entity_uc;
4455 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4457 + if (use_type == USE_TYPE_VCHIQ) {
4458 + sprintf(entity, "VCHIQ: ");
4459 + entity_uc = &arm_state->peer_use_count;
4460 + } else if (service) {
4461 + sprintf(entity, "%c%c%c%c:%03d",
4462 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4463 + service->client_id);
4464 + entity_uc = &service->service_use_count;
4466 + vchiq_log_error(vchiq_susp_log_level, "%s null service "
4468 + ret = VCHIQ_ERROR;
4472 + write_lock_bh(&arm_state->susp_res_lock);
4473 + while (arm_state->resume_blocked) {
4474 + /* If we call 'use' while force suspend is waiting for suspend,
4475 + * then we're about to block the thread which the force is
4476 + * waiting to complete, so we're bound to just time out. In this
4477 + * case, set the suspend state such that the wait will be
4478 + * canceled, so we can complete as quickly as possible. */
4479 + if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
4480 + VC_SUSPEND_IDLE) {
4481 + set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
4484 + /* If suspend is already in progress then we need to block */
4485 + if (!try_wait_for_completion(&arm_state->resume_blocker)) {
4486 + /* Indicate that there are threads waiting on the resume
4487 + * blocker. These need to be allowed to complete before
4488 + * a _second_ call to force suspend can complete,
4489 + * otherwise low priority threads might never actually
4491 + arm_state->blocked_count++;
4492 + write_unlock_bh(&arm_state->susp_res_lock);
4493 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4494 + "blocked - waiting...", __func__, entity);
4495 + if (wait_for_completion_killable(
4496 + &arm_state->resume_blocker) != 0) {
4497 + vchiq_log_error(vchiq_susp_log_level, "%s %s "
4498 + "wait for resume blocker interrupted",
4499 + __func__, entity);
4500 + ret = VCHIQ_ERROR;
4501 + write_lock_bh(&arm_state->susp_res_lock);
4502 + arm_state->blocked_count--;
4503 + write_unlock_bh(&arm_state->susp_res_lock);
4506 + vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
4507 + "unblocked", __func__, entity);
4508 + write_lock_bh(&arm_state->susp_res_lock);
4509 + if (--arm_state->blocked_count == 0)
4510 + complete_all(&arm_state->blocked_blocker);
4514 + stop_suspend_timer(arm_state);
4516 + local_uc = ++arm_state->videocore_use_count;
4517 + local_entity_uc = ++(*entity_uc);
4519 + /* If there's a pending request which hasn't yet been serviced then
4520 + * just clear it. If we're past VC_SUSPEND_REQUESTED state then
4521 + * vc_resume_complete will block until we either resume or fail to
4523 + if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
4524 + set_suspend_state(arm_state, VC_SUSPEND_IDLE);
4526 + if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
4527 + set_resume_state(arm_state, VC_RESUME_REQUESTED);
4528 + vchiq_log_info(vchiq_susp_log_level,
4529 + "%s %s count %d, state count %d",
4530 + __func__, entity, local_entity_uc, local_uc);
4531 + request_poll(state, NULL, 0);
4533 + vchiq_log_trace(vchiq_susp_log_level,
4534 + "%s %s count %d, state count %d",
4535 + __func__, entity, *entity_uc, local_uc);
4538 + write_unlock_bh(&arm_state->susp_res_lock);
4540 + /* Completion is in a done state when we're not suspended, so this won't
4541 + * block for the non-suspended case. */
4542 + if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
4543 + vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
4544 + __func__, entity);
4545 + if (wait_for_completion_killable(
4546 + &arm_state->vc_resume_complete) != 0) {
4547 + vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
4548 + "resume interrupted", __func__, entity);
4549 + ret = VCHIQ_ERROR;
4552 + vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
4556 + if (ret == VCHIQ_SUCCESS) {
4557 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
4558 + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
4559 + while (ack_cnt && (status == VCHIQ_SUCCESS)) {
4560 + /* Send the use notify to videocore */
4561 + status = vchiq_send_remote_use_active(state);
4562 + if (status == VCHIQ_SUCCESS)
4565 + atomic_add(ack_cnt,
4566 + &arm_state->ka_use_ack_count);
4571 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4576 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
4578 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4579 + VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
4582 + int local_uc, local_entity_uc;
4587 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4590 + sprintf(entity, "%c%c%c%c:%03d",
4591 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4592 + service->client_id);
4593 + entity_uc = &service->service_use_count;
4595 + sprintf(entity, "PEER: ");
4596 + entity_uc = &arm_state->peer_use_count;
4599 + write_lock_bh(&arm_state->susp_res_lock);
4600 + if (!arm_state->videocore_use_count || !(*entity_uc)) {
4601 + /* Don't use BUG_ON - don't allow user thread to crash kernel */
4602 + WARN_ON(!arm_state->videocore_use_count);
4603 + WARN_ON(!(*entity_uc));
4604 + ret = VCHIQ_ERROR;
4607 + local_uc = --arm_state->videocore_use_count;
4608 + local_entity_uc = --(*entity_uc);
4610 + if (!vchiq_videocore_wanted(state)) {
4611 + if (vchiq_platform_use_suspend_timer() &&
4612 + !arm_state->resume_blocked) {
4613 + /* Only use the timer if we're not trying to force
4614 + * suspend (=> resume_blocked) */
4615 + start_suspend_timer(arm_state);
4617 + vchiq_log_info(vchiq_susp_log_level,
4618 + "%s %s count %d, state count %d - suspending",
4619 + __func__, entity, *entity_uc,
4620 + arm_state->videocore_use_count);
4621 + vchiq_arm_vcsuspend(state);
4624 + vchiq_log_trace(vchiq_susp_log_level,
4625 + "%s %s count %d, state count %d",
4626 + __func__, entity, *entity_uc,
4627 + arm_state->videocore_use_count);
4630 + write_unlock_bh(&arm_state->susp_res_lock);
4633 + vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
4638 +vchiq_on_remote_use(VCHIQ_STATE_T *state)
4640 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4641 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4642 + atomic_inc(&arm_state->ka_use_count);
4643 + complete(&arm_state->ka_evt);
4647 +vchiq_on_remote_release(VCHIQ_STATE_T *state)
4649 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4650 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4651 + atomic_inc(&arm_state->ka_release_count);
4652 + complete(&arm_state->ka_evt);
4656 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
4658 + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
4662 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
4664 + return vchiq_release_internal(service->state, service);
4667 +static void suspend_timer_callback(unsigned long context)
4669 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
4670 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4673 + vchiq_log_info(vchiq_susp_log_level,
4674 + "%s - suspend timer expired - check suspend", __func__);
4675 + vchiq_check_suspend(state);
4681 +vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
4683 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4684 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4686 + ret = vchiq_use_internal(service->state, service,
4687 + USE_TYPE_SERVICE_NO_RESUME);
4688 + unlock_service(service);
4694 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
4696 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4697 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4699 + ret = vchiq_use_internal(service->state, service,
4700 + USE_TYPE_SERVICE);
4701 + unlock_service(service);
4707 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
4709 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4710 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
4712 + ret = vchiq_release_internal(service->state, service);
4713 + unlock_service(service);
4719 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
4721 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4723 + /* Only dump 64 services */
4724 + static const int local_max_services = 64;
4725 + /* If there's more than 64 services, only dump ones with
4726 + * non-zero counts */
4727 + int only_nonzero = 0;
4728 + static const char *nz = "<-- preventing suspend";
4730 + enum vc_suspend_status vc_suspend_state;
4731 + enum vc_resume_status vc_resume_state;
4734 + int active_services;
4735 + struct service_data_struct {
4739 + } service_data[local_max_services];
4744 + read_lock_bh(&arm_state->susp_res_lock);
4745 + vc_suspend_state = arm_state->vc_suspend_state;
4746 + vc_resume_state = arm_state->vc_resume_state;
4747 + peer_count = arm_state->peer_use_count;
4748 + vc_use_count = arm_state->videocore_use_count;
4749 + active_services = state->unused_service;
4750 + if (active_services > local_max_services)
4753 + for (i = 0; (i < active_services) && (j < local_max_services); i++) {
4754 + VCHIQ_SERVICE_T *service_ptr = state->services[i];
4758 + if (only_nonzero && !service_ptr->service_use_count)
4761 + if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
4762 + service_data[j].fourcc = service_ptr->base.fourcc;
4763 + service_data[j].clientid = service_ptr->client_id;
4764 + service_data[j++].use_count = service_ptr->
4765 + service_use_count;
4769 + read_unlock_bh(&arm_state->susp_res_lock);
4771 + vchiq_log_warning(vchiq_susp_log_level,
4772 + "-- Videcore suspend state: %s --",
4773 + suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
4774 + vchiq_log_warning(vchiq_susp_log_level,
4775 + "-- Videcore resume state: %s --",
4776 + resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
4779 + vchiq_log_warning(vchiq_susp_log_level, "Too many active "
4780 + "services (%d). Only dumping up to first %d services "
4781 + "with non-zero use-count", active_services,
4782 + local_max_services);
4784 + for (i = 0; i < j; i++) {
4785 + vchiq_log_warning(vchiq_susp_log_level,
4786 + "----- %c%c%c%c:%d service count %d %s",
4787 + VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
4788 + service_data[i].clientid,
4789 + service_data[i].use_count,
4790 + service_data[i].use_count ? nz : "");
4792 + vchiq_log_warning(vchiq_susp_log_level,
4793 + "----- VCHIQ use count count %d", peer_count);
4794 + vchiq_log_warning(vchiq_susp_log_level,
4795 + "--- Overall vchiq instance use count %d", vc_use_count);
4797 + vchiq_dump_platform_use_state(state);
4801 +vchiq_check_service(VCHIQ_SERVICE_T *service)
4803 + VCHIQ_ARM_STATE_T *arm_state;
4804 + VCHIQ_STATUS_T ret = VCHIQ_ERROR;
4806 + if (!service || !service->state)
4809 + vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
4811 + arm_state = vchiq_platform_get_arm_state(service->state);
4813 + read_lock_bh(&arm_state->susp_res_lock);
4814 + if (service->service_use_count)
4815 + ret = VCHIQ_SUCCESS;
4816 + read_unlock_bh(&arm_state->susp_res_lock);
4818 + if (ret == VCHIQ_ERROR) {
4819 + vchiq_log_error(vchiq_susp_log_level,
4820 + "%s ERROR - %c%c%c%c:%d service count %d, "
4821 + "state count %d, videocore suspend state %s", __func__,
4822 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
4823 + service->client_id, service->service_use_count,
4824 + arm_state->videocore_use_count,
4825 + suspend_state_names[arm_state->vc_suspend_state +
4826 + VC_SUSPEND_NUM_OFFSET]);
4827 + vchiq_dump_service_use_state(service->state);
4833 +/* stub functions */
4834 +void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
4839 +void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
4840 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
4842 + VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
4843 + vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
4844 + get_conn_state_name(oldstate), get_conn_state_name(newstate));
4845 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
4846 + write_lock_bh(&arm_state->susp_res_lock);
4847 + if (!arm_state->first_connect) {
4848 + char threadname[10];
4849 + arm_state->first_connect = 1;
4850 + write_unlock_bh(&arm_state->susp_res_lock);
4851 + snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
4853 + arm_state->ka_thread = kthread_create(
4854 + &vchiq_keepalive_thread_func,
4857 + if (arm_state->ka_thread == NULL) {
4858 + vchiq_log_error(vchiq_susp_log_level,
4859 + "vchiq: FATAL: couldn't create thread %s",
4862 + wake_up_process(arm_state->ka_thread);
4865 + write_unlock_bh(&arm_state->susp_res_lock);
4870 +/****************************************************************************
4872 +* vchiq_init - called when the module is loaded.
4874 +***************************************************************************/
4882 + /* create proc entries */
4883 + err = vchiq_proc_init();
4885 + goto failed_proc_init;
4887 + err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
4889 + vchiq_log_error(vchiq_arm_log_level,
4890 + "Unable to allocate device number");
4891 + goto failed_alloc_chrdev;
4893 + cdev_init(&vchiq_cdev, &vchiq_fops);
4894 + vchiq_cdev.owner = THIS_MODULE;
4895 + err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
4897 + vchiq_log_error(vchiq_arm_log_level,
4898 + "Unable to register device");
4899 + goto failed_cdev_add;
4902 + /* create sysfs entries */
4903 + vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
4904 + ptr_err = vchiq_class;
4905 + if (IS_ERR(ptr_err))
4906 + goto failed_class_create;
4908 + vchiq_dev = device_create(vchiq_class, NULL,
4909 + vchiq_devid, NULL, "vchiq");
4910 + ptr_err = vchiq_dev;
4911 + if (IS_ERR(ptr_err))
4912 + goto failed_device_create;
4914 + err = vchiq_platform_init(&g_state);
4916 + goto failed_platform_init;
4918 + vchiq_log_info(vchiq_arm_log_level,
4919 + "vchiq: initialised - version %d (min %d), device %d.%d",
4920 + VCHIQ_VERSION, VCHIQ_VERSION_MIN,
4921 + MAJOR(vchiq_devid), MINOR(vchiq_devid));
4925 +failed_platform_init:
4926 + device_destroy(vchiq_class, vchiq_devid);
4927 +failed_device_create:
4928 + class_destroy(vchiq_class);
4929 +failed_class_create:
4930 + cdev_del(&vchiq_cdev);
4931 + err = PTR_ERR(ptr_err);
4933 + unregister_chrdev_region(vchiq_devid, 1);
4934 +failed_alloc_chrdev:
4935 + vchiq_proc_deinit();
4937 + vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
4941 +static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
4943 + VCHIQ_SERVICE_T *service;
4944 + int use_count = 0, i;
4946 + while ((service = next_service_by_instance(instance->state,
4947 + instance, &i)) != NULL) {
4948 + use_count += service->service_use_count;
4949 + unlock_service(service);
4954 +/* read the per-process use-count */
4955 +static int proc_read_use_count(char *page, char **start,
4956 + off_t off, int count,
4957 + int *eof, void *data)
4959 + VCHIQ_INSTANCE_T instance = data;
4960 + int len, use_count;
4962 + use_count = vchiq_instance_get_use_count(instance);
4963 + len = snprintf(page+off, count, "%d\n", use_count);
4968 +/* add an instance (process) to the proc entries */
4969 +static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
4975 + struct proc_dir_entry *top, *use_count;
4976 + struct proc_dir_entry *clients = vchiq_clients_top();
4977 + int pid = instance->pid;
4979 + snprintf(pidstr, sizeof(pidstr), "%d", pid);
4980 + top = proc_mkdir(pidstr, clients);
4984 + use_count = create_proc_read_entry("use_count",
4986 + proc_read_use_count,
4989 + goto fail_use_count;
4991 + instance->proc_entry = top;
4996 + remove_proc_entry(top->name, clients);
5002 +static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
5005 + struct proc_dir_entry *clients = vchiq_clients_top();
5006 + remove_proc_entry("use_count", instance->proc_entry);
5007 + remove_proc_entry(instance->proc_entry->name, clients);
5011 +/****************************************************************************
5013 +* vchiq_exit - called when the module is unloaded.
5015 +***************************************************************************/
5020 + vchiq_platform_exit(&g_state);
5021 + device_destroy(vchiq_class, vchiq_devid);
5022 + class_destroy(vchiq_class);
5023 + cdev_del(&vchiq_cdev);
5024 + unregister_chrdev_region(vchiq_devid, 1);
5027 +module_init(vchiq_init);
5028 +module_exit(vchiq_exit);
5029 +MODULE_LICENSE("GPL");
5030 +MODULE_AUTHOR("Broadcom Corporation");
5032 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h
5035 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5037 + * Redistribution and use in source and binary forms, with or without
5038 + * modification, are permitted provided that the following conditions
5040 + * 1. Redistributions of source code must retain the above copyright
5041 + * notice, this list of conditions, and the following disclaimer,
5042 + * without modification.
5043 + * 2. Redistributions in binary form must reproduce the above copyright
5044 + * notice, this list of conditions and the following disclaimer in the
5045 + * documentation and/or other materials provided with the distribution.
5046 + * 3. The names of the above-listed copyright holders may not be used
5047 + * to endorse or promote products derived from this software without
5048 + * specific prior written permission.
5050 + * ALTERNATIVELY, this software may be distributed under the terms of the
5051 + * GNU General Public License ("GPL") version 2, as published by the Free
5052 + * Software Foundation.
5054 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5055 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5056 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5057 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5058 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5059 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5060 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5061 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5062 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5063 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5064 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5067 +#ifndef VCHIQ_ARM_H
5068 +#define VCHIQ_ARM_H
5070 +#include <linux/mutex.h>
5071 +#include <linux/semaphore.h>
5072 +#include <linux/atomic.h>
5073 +#include "vchiq_core.h"
5076 +enum vc_suspend_status {
5077 + VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
5078 + VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
5079 + VC_SUSPEND_FAILED = -1, /* Videocore suspend failed */
5080 + VC_SUSPEND_IDLE = 0, /* VC active, no suspend actions */
5081 + VC_SUSPEND_REQUESTED, /* User has requested suspend */
5082 + VC_SUSPEND_IN_PROGRESS, /* Slot handler has recvd suspend request */
5083 + VC_SUSPEND_SUSPENDED /* Videocore suspend succeeded */
5086 +enum vc_resume_status {
5087 + VC_RESUME_FAILED = -1, /* Videocore resume failed */
5088 + VC_RESUME_IDLE = 0, /* VC suspended, no resume actions */
5089 + VC_RESUME_REQUESTED, /* User has requested resume */
5090 + VC_RESUME_IN_PROGRESS, /* Slot handler has received resume request */
5091 + VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
5097 + USE_TYPE_SERVICE_NO_RESUME,
5103 +typedef struct vchiq_arm_state_struct {
5104 + /* Keepalive-related data */
5105 + struct task_struct *ka_thread;
5106 + struct completion ka_evt;
5107 + atomic_t ka_use_count;
5108 + atomic_t ka_use_ack_count;
5109 + atomic_t ka_release_count;
5111 + struct completion vc_suspend_complete;
5112 + struct completion vc_resume_complete;
5114 + rwlock_t susp_res_lock;
5115 + enum vc_suspend_status vc_suspend_state;
5116 + enum vc_resume_status vc_resume_state;
5118 + unsigned int wake_address;
5120 + struct timer_list suspend_timer;
5121 + int suspend_timer_timeout;
5122 + int suspend_timer_running;
5124 + /* Global use count for videocore.
5125 + ** This is equal to the sum of the use counts for all services. When
5126 + ** this hits zero the videocore suspend procedure will be initiated.
5128 + int videocore_use_count;
5130 + /* Use count to track requests from videocore peer.
5131 + ** This use count is not associated with a service, so needs to be
5132 + ** tracked separately with the state.
5134 + int peer_use_count;
5136 + /* Flag to indicate whether resume is blocked. This happens when the
5137 + ** ARM is suspending
5139 + struct completion resume_blocker;
5140 + int resume_blocked;
5141 + struct completion blocked_blocker;
5142 + int blocked_count;
5144 + int autosuspend_override;
5146 + /* Flag to indicate that the first vchiq connect has made it through.
5147 + ** This means that both sides should be fully ready, and we should
5148 + ** be able to suspend after this point.
5150 + int first_connect;
5152 + unsigned long long suspend_start_time;
5153 + unsigned long long sleep_start_time;
5154 + unsigned long long resume_start_time;
5155 + unsigned long long last_wake_time;
5157 +} VCHIQ_ARM_STATE_T;
5159 +extern int vchiq_arm_log_level;
5160 +extern int vchiq_susp_log_level;
5163 +vchiq_platform_init(VCHIQ_STATE_T *state);
5166 +vchiq_platform_exit(VCHIQ_STATE_T *state);
5168 +extern VCHIQ_STATE_T *
5169 +vchiq_get_state(void);
5171 +extern VCHIQ_STATUS_T
5172 +vchiq_arm_vcsuspend(VCHIQ_STATE_T *state);
5174 +extern VCHIQ_STATUS_T
5175 +vchiq_arm_force_suspend(VCHIQ_STATE_T *state);
5178 +vchiq_arm_allow_resume(VCHIQ_STATE_T *state);
5180 +extern VCHIQ_STATUS_T
5181 +vchiq_arm_vcresume(VCHIQ_STATE_T *state);
5183 +extern VCHIQ_STATUS_T
5184 +vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state);
5187 +vchiq_check_resume(VCHIQ_STATE_T *state);
5190 +vchiq_check_suspend(VCHIQ_STATE_T *state);
5192 +extern VCHIQ_STATUS_T
5193 +vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
5195 +extern VCHIQ_STATUS_T
5196 +vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
5198 +extern VCHIQ_STATUS_T
5199 +vchiq_check_service(VCHIQ_SERVICE_T *service);
5201 +extern VCHIQ_STATUS_T
5202 +vchiq_platform_suspend(VCHIQ_STATE_T *state);
5205 +vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state);
5208 +vchiq_platform_use_suspend_timer(void);
5211 +vchiq_dump_platform_use_state(VCHIQ_STATE_T *state);
5214 +vchiq_dump_service_use_state(VCHIQ_STATE_T *state);
5216 +extern VCHIQ_ARM_STATE_T*
5217 +vchiq_platform_get_arm_state(VCHIQ_STATE_T *state);
5220 +vchiq_videocore_wanted(VCHIQ_STATE_T *state);
5222 +extern VCHIQ_STATUS_T
5223 +vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
5224 + enum USE_TYPE_E use_type);
5225 +extern VCHIQ_STATUS_T
5226 +vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service);
5229 +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
5230 + enum vc_suspend_status new_state);
5233 +set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
5234 + enum vc_resume_status new_state);
5237 +start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
5239 +extern int vchiq_proc_init(void);
5240 +extern void vchiq_proc_deinit(void);
5241 +extern struct proc_dir_entry *vchiq_proc_top(void);
5242 +extern struct proc_dir_entry *vchiq_clients_top(void);
5245 +#endif /* VCHIQ_ARM_H */
5247 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h
5250 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5252 + * Redistribution and use in source and binary forms, with or without
5253 + * modification, are permitted provided that the following conditions
5255 + * 1. Redistributions of source code must retain the above copyright
5256 + * notice, this list of conditions, and the following disclaimer,
5257 + * without modification.
5258 + * 2. Redistributions in binary form must reproduce the above copyright
5259 + * notice, this list of conditions and the following disclaimer in the
5260 + * documentation and/or other materials provided with the distribution.
5261 + * 3. The names of the above-listed copyright holders may not be used
5262 + * to endorse or promote products derived from this software without
5263 + * specific prior written permission.
5265 + * ALTERNATIVELY, this software may be distributed under the terms of the
5266 + * GNU General Public License ("GPL") version 2, as published by the Free
5267 + * Software Foundation.
5269 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5270 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5271 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5272 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5273 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5274 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5275 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5276 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5277 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5278 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5279 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5282 +const char *vchiq_get_build_hostname(void);
5283 +const char *vchiq_get_build_version(void);
5284 +const char *vchiq_get_build_time(void);
5285 +const char *vchiq_get_build_date(void);
5287 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h
5290 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5292 + * Redistribution and use in source and binary forms, with or without
5293 + * modification, are permitted provided that the following conditions
5295 + * 1. Redistributions of source code must retain the above copyright
5296 + * notice, this list of conditions, and the following disclaimer,
5297 + * without modification.
5298 + * 2. Redistributions in binary form must reproduce the above copyright
5299 + * notice, this list of conditions and the following disclaimer in the
5300 + * documentation and/or other materials provided with the distribution.
5301 + * 3. The names of the above-listed copyright holders may not be used
5302 + * to endorse or promote products derived from this software without
5303 + * specific prior written permission.
5305 + * ALTERNATIVELY, this software may be distributed under the terms of the
5306 + * GNU General Public License ("GPL") version 2, as published by the Free
5307 + * Software Foundation.
5309 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5310 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5311 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5312 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5313 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5314 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5315 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5316 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5317 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5318 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5319 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5322 +#ifndef VCHIQ_CFG_H
5323 +#define VCHIQ_CFG_H
5325 +#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
5326 +/* The version of VCHIQ - change with any non-trivial change */
5327 +#define VCHIQ_VERSION 6
5328 +/* The minimum compatible version - update to match VCHIQ_VERSION with any
5329 +** incompatible change */
5330 +#define VCHIQ_VERSION_MIN 3
5332 +#define VCHIQ_MAX_STATES 1
5333 +#define VCHIQ_MAX_SERVICES 4096
5334 +#define VCHIQ_MAX_SLOTS 128
5335 +#define VCHIQ_MAX_SLOTS_PER_SIDE 64
5337 +#define VCHIQ_NUM_CURRENT_BULKS 32
5338 +#define VCHIQ_NUM_SERVICE_BULKS 4
5340 +#ifndef VCHIQ_ENABLE_DEBUG
5341 +#define VCHIQ_ENABLE_DEBUG 1
5344 +#ifndef VCHIQ_ENABLE_STATS
5345 +#define VCHIQ_ENABLE_STATS 1
5348 +#endif /* VCHIQ_CFG_H */
5350 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c
5353 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5355 + * Redistribution and use in source and binary forms, with or without
5356 + * modification, are permitted provided that the following conditions
5358 + * 1. Redistributions of source code must retain the above copyright
5359 + * notice, this list of conditions, and the following disclaimer,
5360 + * without modification.
5361 + * 2. Redistributions in binary form must reproduce the above copyright
5362 + * notice, this list of conditions and the following disclaimer in the
5363 + * documentation and/or other materials provided with the distribution.
5364 + * 3. The names of the above-listed copyright holders may not be used
5365 + * to endorse or promote products derived from this software without
5366 + * specific prior written permission.
5368 + * ALTERNATIVELY, this software may be distributed under the terms of the
5369 + * GNU General Public License ("GPL") version 2, as published by the Free
5370 + * Software Foundation.
5372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5373 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5374 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5375 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5376 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5377 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5378 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5379 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5380 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5381 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5382 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5385 +#include "vchiq_connected.h"
5386 +#include "vchiq_core.h"
5387 +#include <linux/module.h>
5388 +#include <linux/mutex.h>
5390 +#define MAX_CALLBACKS 10
5392 +static int g_connected;
5393 +static int g_num_deferred_callbacks;
5394 +static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
5395 +static int g_once_init;
5396 +static struct mutex g_connected_mutex;
5398 +/****************************************************************************
5400 +* Function to initialize our lock.
5402 +***************************************************************************/
5404 +static void connected_init(void)
5406 + if (!g_once_init) {
5407 + mutex_init(&g_connected_mutex);
5412 +/****************************************************************************
5414 +* This function is used to defer initialization until the vchiq stack is
5415 +* initialized. If the stack is already initialized, then the callback will
5416 +* be made immediately, otherwise it will be deferred until
5417 +* vchiq_call_connected_callbacks is called.
5419 +***************************************************************************/
5421 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
5425 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5429 + /* We're already connected. Call the callback immediately. */
5433 + if (g_num_deferred_callbacks >= MAX_CALLBACKS)
5434 + vchiq_log_error(vchiq_core_log_level,
5435 + "There already %d callback registered - "
5436 + "please increase MAX_CALLBACKS",
5437 + g_num_deferred_callbacks);
5439 + g_deferred_callback[g_num_deferred_callbacks] =
5441 + g_num_deferred_callbacks++;
5444 + mutex_unlock(&g_connected_mutex);
5447 +/****************************************************************************
5449 +* This function is called by the vchiq stack once it has been connected to
5450 +* the videocore and clients can start to use the stack.
5452 +***************************************************************************/
5454 +void vchiq_call_connected_callbacks(void)
5460 + if (mutex_lock_interruptible(&g_connected_mutex) != 0)
5463 + for (i = 0; i < g_num_deferred_callbacks; i++)
5464 + g_deferred_callback[i]();
5466 + g_num_deferred_callbacks = 0;
5468 + mutex_unlock(&g_connected_mutex);
5470 +EXPORT_SYMBOL(vchiq_add_connected_callback);
5472 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h
5475 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5477 + * Redistribution and use in source and binary forms, with or without
5478 + * modification, are permitted provided that the following conditions
5480 + * 1. Redistributions of source code must retain the above copyright
5481 + * notice, this list of conditions, and the following disclaimer,
5482 + * without modification.
5483 + * 2. Redistributions in binary form must reproduce the above copyright
5484 + * notice, this list of conditions and the following disclaimer in the
5485 + * documentation and/or other materials provided with the distribution.
5486 + * 3. The names of the above-listed copyright holders may not be used
5487 + * to endorse or promote products derived from this software without
5488 + * specific prior written permission.
5490 + * ALTERNATIVELY, this software may be distributed under the terms of the
5491 + * GNU General Public License ("GPL") version 2, as published by the Free
5492 + * Software Foundation.
5494 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5495 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5496 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5497 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5498 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5499 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5500 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5501 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5502 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5503 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5504 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5507 +#ifndef VCHIQ_CONNECTED_H
5508 +#define VCHIQ_CONNECTED_H
5510 +/* ---- Include Files ----------------------------------------------------- */
5512 +/* ---- Constants and Types ---------------------------------------------- */
5514 +typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
5516 +/* ---- Variable Externs ------------------------------------------------- */
5518 +/* ---- Function Prototypes ---------------------------------------------- */
5520 +void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
5521 +void vchiq_call_connected_callbacks(void);
5523 +#endif /* VCHIQ_CONNECTED_H */
5525 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c
5528 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5530 + * Redistribution and use in source and binary forms, with or without
5531 + * modification, are permitted provided that the following conditions
5533 + * 1. Redistributions of source code must retain the above copyright
5534 + * notice, this list of conditions, and the following disclaimer,
5535 + * without modification.
5536 + * 2. Redistributions in binary form must reproduce the above copyright
5537 + * notice, this list of conditions and the following disclaimer in the
5538 + * documentation and/or other materials provided with the distribution.
5539 + * 3. The names of the above-listed copyright holders may not be used
5540 + * to endorse or promote products derived from this software without
5541 + * specific prior written permission.
5543 + * ALTERNATIVELY, this software may be distributed under the terms of the
5544 + * GNU General Public License ("GPL") version 2, as published by the Free
5545 + * Software Foundation.
5547 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
5548 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
5549 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
5550 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
5551 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
5552 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
5553 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
5554 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
5555 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
5556 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
5557 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5560 +#include "vchiq_core.h"
5562 +#define VCHIQ_SLOT_HANDLER_STACK 8192
5564 +#define HANDLE_STATE_SHIFT 12
5566 +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
5567 +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
5568 +#define SLOT_INDEX_FROM_DATA(state, data) \
5569 + (((unsigned int)((char *)data - (char *)state->slot_data)) / \
5571 +#define SLOT_INDEX_FROM_INFO(state, info) \
5572 + ((unsigned int)(info - state->slot_info))
5573 +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \
5574 + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
5577 +#define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
5580 +struct vchiq_open_payload {
5584 + short version_min;
5587 +struct vchiq_openack_payload {
5591 +/* we require this for consistency between endpoints */
5592 +vchiq_static_assert(sizeof(VCHIQ_HEADER_T) == 8);
5593 +vchiq_static_assert(IS_POW2(sizeof(VCHIQ_HEADER_T)));
5594 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
5595 +vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
5596 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
5597 +vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
5599 +/* Run time control of log level, based on KERN_XXX level. */
5600 +int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
5601 +int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
5602 +int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
5604 +static atomic_t pause_bulks_count = ATOMIC_INIT(0);
5606 +static DEFINE_SPINLOCK(service_spinlock);
5607 +DEFINE_SPINLOCK(bulk_waiter_spinlock);
5608 +DEFINE_SPINLOCK(quota_spinlock);
5610 +VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
5611 +static unsigned int handle_seq;
5613 +static const char *const srvstate_names[] = {
5626 +static const char *const reason_names[] = {
5629 + "MESSAGE_AVAILABLE",
5630 + "BULK_TRANSMIT_DONE",
5631 + "BULK_RECEIVE_DONE",
5632 + "BULK_TRANSMIT_ABORTED",
5633 + "BULK_RECEIVE_ABORTED"
5636 +static const char *const conn_state_names[] = {
5650 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
5652 +static const char *msg_type_str(unsigned int msg_type)
5654 + switch (msg_type) {
5655 + case VCHIQ_MSG_PADDING: return "PADDING";
5656 + case VCHIQ_MSG_CONNECT: return "CONNECT";
5657 + case VCHIQ_MSG_OPEN: return "OPEN";
5658 + case VCHIQ_MSG_OPENACK: return "OPENACK";
5659 + case VCHIQ_MSG_CLOSE: return "CLOSE";
5660 + case VCHIQ_MSG_DATA: return "DATA";
5661 + case VCHIQ_MSG_BULK_RX: return "BULK_RX";
5662 + case VCHIQ_MSG_BULK_TX: return "BULK_TX";
5663 + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
5664 + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
5665 + case VCHIQ_MSG_PAUSE: return "PAUSE";
5666 + case VCHIQ_MSG_RESUME: return "RESUME";
5667 + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
5668 + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
5669 + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
5675 +vchiq_set_service_state(VCHIQ_SERVICE_T *service, int newstate)
5677 + vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
5678 + service->state->id, service->localport,
5679 + srvstate_names[service->srvstate],
5680 + srvstate_names[newstate]);
5681 + service->srvstate = newstate;
5685 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
5687 + VCHIQ_SERVICE_T *service;
5689 + spin_lock(&service_spinlock);
5690 + service = handle_to_service(handle);
5691 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5692 + (service->handle == handle)) {
5693 + BUG_ON(service->ref_count == 0);
5694 + service->ref_count++;
5697 + spin_unlock(&service_spinlock);
5700 + vchiq_log_info(vchiq_core_log_level,
5701 + "Invalid service handle 0x%x", handle);
5707 +find_service_by_port(VCHIQ_STATE_T *state, int localport)
5709 + VCHIQ_SERVICE_T *service = NULL;
5710 + if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
5711 + spin_lock(&service_spinlock);
5712 + service = state->services[localport];
5713 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
5714 + BUG_ON(service->ref_count == 0);
5715 + service->ref_count++;
5718 + spin_unlock(&service_spinlock);
5722 + vchiq_log_info(vchiq_core_log_level,
5723 + "Invalid port %d", localport);
5729 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
5730 + VCHIQ_SERVICE_HANDLE_T handle) {
5731 + VCHIQ_SERVICE_T *service;
5733 + spin_lock(&service_spinlock);
5734 + service = handle_to_service(handle);
5735 + if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
5736 + (service->handle == handle) &&
5737 + (service->instance == instance)) {
5738 + BUG_ON(service->ref_count == 0);
5739 + service->ref_count++;
5742 + spin_unlock(&service_spinlock);
5745 + vchiq_log_info(vchiq_core_log_level,
5746 + "Invalid service handle 0x%x", handle);
5752 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
5755 + VCHIQ_SERVICE_T *service = NULL;
5758 + spin_lock(&service_spinlock);
5759 + while (idx < state->unused_service) {
5760 + VCHIQ_SERVICE_T *srv = state->services[idx++];
5761 + if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
5762 + (srv->instance == instance)) {
5764 + BUG_ON(service->ref_count == 0);
5765 + service->ref_count++;
5769 + spin_unlock(&service_spinlock);
5777 +lock_service(VCHIQ_SERVICE_T *service)
5779 + spin_lock(&service_spinlock);
5780 + BUG_ON(!service || (service->ref_count == 0));
5782 + service->ref_count++;
5783 + spin_unlock(&service_spinlock);
5787 +unlock_service(VCHIQ_SERVICE_T *service)
5789 + VCHIQ_STATE_T *state = service->state;
5790 + spin_lock(&service_spinlock);
5791 + BUG_ON(!service || (service->ref_count == 0));
5792 + if (service && service->ref_count) {
5793 + service->ref_count--;
5794 + if (!service->ref_count) {
5795 + BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
5796 + state->services[service->localport] = NULL;
5800 + spin_unlock(&service_spinlock);
5802 + if (service && service->userdata_term)
5803 + service->userdata_term(service->base.userdata);
5809 +vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
5811 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
5814 + id = service ? service->client_id : 0;
5816 + unlock_service(service);
5822 +vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
5824 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5826 + return service ? service->base.userdata : NULL;
5830 +vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
5832 + VCHIQ_SERVICE_T *service = handle_to_service(handle);
5834 + return service ? service->base.fourcc : 0;
5838 +mark_service_closing_internal(VCHIQ_SERVICE_T *service, int sh_thread)
5840 + VCHIQ_STATE_T *state = service->state;
5841 + VCHIQ_SERVICE_QUOTA_T *service_quota;
5843 + service->closing = 1;
5845 + /* Synchronise with other threads. */
5846 + mutex_lock(&state->recycle_mutex);
5847 + mutex_unlock(&state->recycle_mutex);
5848 + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
5849 + /* If we're pausing then the slot_mutex is held until resume
5850 + * by the slot handler. Therefore don't try to acquire this
5851 + * mutex if we're the slot handler and in the pause sent state.
5852 + * We don't need to in this case anyway. */
5853 + mutex_lock(&state->slot_mutex);
5854 + mutex_unlock(&state->slot_mutex);
5857 + /* Unblock any sending thread. */
5858 + service_quota = &state->service_quotas[service->localport];
5859 + up(&service_quota->quota_event);
5863 +mark_service_closing(VCHIQ_SERVICE_T *service)
5865 + mark_service_closing_internal(service, 0);
5868 +static inline VCHIQ_STATUS_T
5869 +make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
5870 + VCHIQ_HEADER_T *header, void *bulk_userdata)
5872 + VCHIQ_STATUS_T status;
5873 + vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
5874 + service->state->id, service->localport, reason_names[reason],
5875 + (unsigned int)header, (unsigned int)bulk_userdata);
5876 + status = service->base.callback(reason, header, service->handle,
5878 + if (status == VCHIQ_ERROR) {
5879 + vchiq_log_warning(vchiq_core_log_level,
5880 + "%d: ignoring ERROR from callback to service %x",
5881 + service->state->id, service->handle);
5882 + status = VCHIQ_SUCCESS;
5888 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
5890 + VCHIQ_CONNSTATE_T oldstate = state->conn_state;
5891 + vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
5892 + conn_state_names[oldstate],
5893 + conn_state_names[newstate]);
5894 + state->conn_state = newstate;
5895 + vchiq_platform_conn_state_changed(state, oldstate, newstate);
5899 +remote_event_create(REMOTE_EVENT_T *event)
5902 + /* Don't clear the 'fired' flag because it may already have been set
5903 + ** by the other side. */
5904 + sema_init(event->event, 0);
5908 +remote_event_destroy(REMOTE_EVENT_T *event)
5914 +remote_event_wait(REMOTE_EVENT_T *event)
5916 + if (!event->fired) {
5919 + if (!event->fired) {
5920 + if (down_interruptible(event->event) != 0) {
5934 +remote_event_signal_local(REMOTE_EVENT_T *event)
5941 +remote_event_poll(REMOTE_EVENT_T *event)
5943 + if (event->fired && event->armed)
5944 + remote_event_signal_local(event);
5948 +remote_event_pollall(VCHIQ_STATE_T *state)
5950 + remote_event_poll(&state->local->sync_trigger);
5951 + remote_event_poll(&state->local->sync_release);
5952 + remote_event_poll(&state->local->trigger);
5953 + remote_event_poll(&state->local->recycle);
5956 +/* Round up message sizes so that any space at the end of a slot is always big
5957 +** enough for a header. This relies on header size being a power of two, which
5958 +** has been verified earlier by a static assertion. */
5960 +static inline unsigned int
5961 +calc_stride(unsigned int size)
5963 + /* Allow room for the header */
5964 + size += sizeof(VCHIQ_HEADER_T);
5967 + return (size + sizeof(VCHIQ_HEADER_T) - 1) & ~(sizeof(VCHIQ_HEADER_T)
5971 +/* Called by the slot handler thread */
5972 +static VCHIQ_SERVICE_T *
5973 +get_listening_service(VCHIQ_STATE_T *state, int fourcc)
5977 + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
5979 + for (i = 0; i < state->unused_service; i++) {
5980 + VCHIQ_SERVICE_T *service = state->services[i];
5982 + (service->public_fourcc == fourcc) &&
5983 + ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
5984 + ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
5985 + (service->remoteport == VCHIQ_PORT_FREE)))) {
5986 + lock_service(service);
5994 +/* Called by the slot handler thread */
5995 +static VCHIQ_SERVICE_T *
5996 +get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
5999 + for (i = 0; i < state->unused_service; i++) {
6000 + VCHIQ_SERVICE_T *service = state->services[i];
6001 + if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
6002 + && (service->remoteport == port)) {
6003 + lock_service(service);
6011 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
6017 + value = atomic_read(&service->poll_flags);
6018 + } while (atomic_cmpxchg(&service->poll_flags, value,
6019 + value | (1 << poll_type)) != value);
6022 + value = atomic_read(&state->poll_services[
6023 + service->localport>>5]);
6024 + } while (atomic_cmpxchg(
6025 + &state->poll_services[service->localport>>5],
6026 + value, value | (1 << (service->localport & 0x1f)))
6030 + state->poll_needed = 1;
6033 + /* ... and ensure the slot handler runs. */
6034 + remote_event_signal_local(&state->local->trigger);
6037 +/* Called from queue_message, by the slot handler and application threads,
6038 +** with slot_mutex held */
6039 +static VCHIQ_HEADER_T *
6040 +reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
6042 + VCHIQ_SHARED_STATE_T *local = state->local;
6043 + int tx_pos = state->local_tx_pos;
6044 + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
6046 + if (space > slot_space) {
6047 + VCHIQ_HEADER_T *header;
6048 + /* Fill the remaining space with padding */
6049 + WARN_ON(state->tx_data == NULL);
6050 + header = (VCHIQ_HEADER_T *)
6051 + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6052 + header->msgid = VCHIQ_MSGID_PADDING;
6053 + header->size = slot_space - sizeof(VCHIQ_HEADER_T);
6055 + tx_pos += slot_space;
6058 + /* If necessary, get the next slot. */
6059 + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
6062 + /* If there is no free slot... */
6064 + if (down_trylock(&state->slot_available_event) != 0) {
6065 + /* ...wait for one. */
6067 + VCHIQ_STATS_INC(state, slot_stalls);
6069 + /* But first, flush through the last slot. */
6070 + state->local_tx_pos = tx_pos;
6071 + local->tx_pos = tx_pos;
6072 + remote_event_signal(&state->remote->trigger);
6074 + if (!is_blocking ||
6075 + (down_interruptible(
6076 + &state->slot_available_event) != 0))
6077 + return NULL; /* No space available */
6081 + (state->slot_queue_available * VCHIQ_SLOT_SIZE));
6083 + slot_index = local->slot_queue[
6084 + SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
6085 + VCHIQ_SLOT_QUEUE_MASK];
6087 + (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6090 + state->local_tx_pos = tx_pos + space;
6092 + return (VCHIQ_HEADER_T *)(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
6095 +/* Called by the recycle thread. */
6097 +process_free_queue(VCHIQ_STATE_T *state)
6099 + VCHIQ_SHARED_STATE_T *local = state->local;
6100 + BITSET_T service_found[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
6101 + int slot_queue_available;
6103 + /* Use a read memory barrier to ensure that any state that may have
6104 + ** been modified by another thread is not masked by stale prefetched
6108 + /* Find slots which have been freed by the other side, and return them
6109 + ** to the available queue. */
6110 + slot_queue_available = state->slot_queue_available;
6112 + while (slot_queue_available != local->slot_queue_recycle) {
6114 + int slot_index = local->slot_queue[slot_queue_available++ &
6115 + VCHIQ_SLOT_QUEUE_MASK];
6116 + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
6117 + int data_found = 0;
6119 + vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
6120 + state->id, slot_index, (unsigned int)data,
6121 + local->slot_queue_recycle, slot_queue_available);
6123 + /* Initialise the bitmask for services which have used this
6125 + BITSET_ZERO(service_found);
6129 + while (pos < VCHIQ_SLOT_SIZE) {
6130 + VCHIQ_HEADER_T *header =
6131 + (VCHIQ_HEADER_T *)(data + pos);
6132 + int msgid = header->msgid;
6133 + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
6134 + int port = VCHIQ_MSG_SRCPORT(msgid);
6135 + VCHIQ_SERVICE_QUOTA_T *service_quota =
6136 + &state->service_quotas[port];
6138 + spin_lock("a_spinlock);
6139 + count = service_quota->message_use_count;
6141 + service_quota->message_use_count =
6143 + spin_unlock("a_spinlock);
6145 + if (count == service_quota->message_quota)
6146 + /* Signal the service that it
6147 + ** has dropped below its quota
6149 + up(&service_quota->quota_event);
6150 + else if (count == 0) {
6151 + vchiq_log_error(vchiq_core_log_level,
6153 + "message_use_count=%d "
6154 + "(header %x, msgid %x, "
6155 + "header->msgid %x, "
6156 + "header->size %x)",
6159 + message_use_count,
6160 + (unsigned int)header, msgid,
6163 + WARN(1, "invalid message use count\n");
6165 + if (!BITSET_IS_SET(service_found, port)) {
6166 + /* Set the found bit for this service */
6167 + BITSET_SET(service_found, port);
6169 + spin_lock("a_spinlock);
6170 + count = service_quota->slot_use_count;
6172 + service_quota->slot_use_count =
6174 + spin_unlock("a_spinlock);
6177 + /* Signal the service in case
6178 + ** it has dropped below its
6180 + up(&service_quota->quota_event);
6182 + vchiq_core_log_level,
6183 + "%d: pfq:%d %x@%x - "
6187 + (unsigned int)header,
6191 + vchiq_core_log_level,
6200 + (unsigned int)header,
6204 + WARN(1, "bad slot use count\n");
6211 + pos += calc_stride(header->size);
6212 + if (pos > VCHIQ_SLOT_SIZE) {
6213 + vchiq_log_error(vchiq_core_log_level,
6214 + "pfq - pos %x: header %x, msgid %x, "
6215 + "header->msgid %x, header->size %x",
6216 + pos, (unsigned int)header, msgid,
6217 + header->msgid, header->size);
6218 + WARN(1, "invalid slot position\n");
6224 + spin_lock("a_spinlock);
6225 + count = state->data_use_count;
6227 + state->data_use_count =
6229 + spin_unlock("a_spinlock);
6230 + if (count == state->data_quota)
6231 + up(&state->data_quota_event);
6234 + state->slot_queue_available = slot_queue_available;
6235 + up(&state->slot_available_event);
6239 +/* Called by the slot handler and application threads */
6240 +static VCHIQ_STATUS_T
6241 +queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6242 + int msgid, const VCHIQ_ELEMENT_T *elements,
6243 + int count, int size, int is_blocking)
6245 + VCHIQ_SHARED_STATE_T *local;
6246 + VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
6247 + VCHIQ_HEADER_T *header;
6248 + int type = VCHIQ_MSG_TYPE(msgid);
6250 + unsigned int stride;
6252 + local = state->local;
6254 + stride = calc_stride(size);
6256 + WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
6258 + if ((type != VCHIQ_MSG_RESUME) &&
6259 + (mutex_lock_interruptible(&state->slot_mutex) != 0))
6260 + return VCHIQ_RETRY;
6262 + if (type == VCHIQ_MSG_DATA) {
6267 + if (service->closing) {
6268 + /* The service has been closed */
6269 + mutex_unlock(&state->slot_mutex);
6270 + return VCHIQ_ERROR;
6273 + service_quota = &state->service_quotas[service->localport];
6275 + spin_lock("a_spinlock);
6277 + /* Ensure this service doesn't use more than its quota of
6278 + ** messages or slots */
6279 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6280 + state->local_tx_pos + stride - 1);
6282 + /* Ensure data messages don't use more than their quota of
6284 + while ((tx_end_index != state->previous_data_index) &&
6285 + (state->data_use_count == state->data_quota)) {
6286 + VCHIQ_STATS_INC(state, data_stalls);
6287 + spin_unlock("a_spinlock);
6288 + mutex_unlock(&state->slot_mutex);
6290 + if (down_interruptible(&state->data_quota_event)
6292 + return VCHIQ_RETRY;
6294 + mutex_lock(&state->slot_mutex);
6295 + spin_lock("a_spinlock);
6296 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6297 + state->local_tx_pos + stride - 1);
6298 + if ((tx_end_index == state->previous_data_index) ||
6299 + (state->data_use_count < state->data_quota)) {
6300 + /* Pass the signal on to other waiters */
6301 + up(&state->data_quota_event);
6306 + while ((service_quota->message_use_count ==
6307 + service_quota->message_quota) ||
6308 + ((tx_end_index != service_quota->previous_tx_index) &&
6309 + (service_quota->slot_use_count ==
6310 + service_quota->slot_quota))) {
6311 + spin_unlock("a_spinlock);
6312 + vchiq_log_trace(vchiq_core_log_level,
6313 + "%d: qm:%d %s,%x - quota stall "
6314 + "(msg %d, slot %d)",
6315 + state->id, service->localport,
6316 + msg_type_str(type), size,
6317 + service_quota->message_use_count,
6318 + service_quota->slot_use_count);
6319 + VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
6320 + mutex_unlock(&state->slot_mutex);
6321 + if (down_interruptible(&service_quota->quota_event)
6323 + return VCHIQ_RETRY;
6324 + if (service->closing)
6325 + return VCHIQ_ERROR;
6326 + if (mutex_lock_interruptible(&state->slot_mutex) != 0)
6327 + return VCHIQ_RETRY;
6328 + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
6329 + /* The service has been closed */
6330 + mutex_unlock(&state->slot_mutex);
6331 + return VCHIQ_ERROR;
6333 + spin_lock("a_spinlock);
6334 + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
6335 + state->local_tx_pos + stride - 1);
6338 + spin_unlock("a_spinlock);
6341 + header = reserve_space(state, stride, is_blocking);
6345 + VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
6346 + mutex_unlock(&state->slot_mutex);
6347 + return VCHIQ_RETRY;
6350 + if (type == VCHIQ_MSG_DATA) {
6353 + int slot_use_count;
6355 + vchiq_log_info(vchiq_core_log_level,
6356 + "%d: qm %s@%x,%x (%d->%d)",
6358 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6359 + (unsigned int)header, size,
6360 + VCHIQ_MSG_SRCPORT(msgid),
6361 + VCHIQ_MSG_DSTPORT(msgid));
6365 + for (i = 0, pos = 0; i < (unsigned int)count;
6366 + pos += elements[i++].size)
6367 + if (elements[i].size) {
6368 + if (vchiq_copy_from_user
6369 + (header->data + pos, elements[i].data,
6370 + (size_t) elements[i].size) !=
6372 + mutex_unlock(&state->slot_mutex);
6373 + VCHIQ_SERVICE_STATS_INC(service,
6375 + return VCHIQ_ERROR;
6378 + if (vchiq_core_msg_log_level >=
6380 + vchiq_log_dump_mem("Sent", 0,
6381 + header->data + pos,
6383 + elements[0].size));
6387 + spin_lock("a_spinlock);
6388 + service_quota->message_use_count++;
6391 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
6393 + /* If this transmission can't fit in the last slot used by any
6394 + ** service, the data_use_count must be increased. */
6395 + if (tx_end_index != state->previous_data_index) {
6396 + state->previous_data_index = tx_end_index;
6397 + state->data_use_count++;
6400 + /* If this isn't the same slot last used by this service,
6401 + ** the service's slot_use_count must be increased. */
6402 + if (tx_end_index != service_quota->previous_tx_index) {
6403 + service_quota->previous_tx_index = tx_end_index;
6404 + slot_use_count = ++service_quota->slot_use_count;
6406 + slot_use_count = 0;
6409 + spin_unlock("a_spinlock);
6411 + if (slot_use_count)
6412 + vchiq_log_trace(vchiq_core_log_level,
6413 + "%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
6414 + state->id, service->localport,
6415 + msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
6416 + slot_use_count, header);
6418 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6419 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6421 + vchiq_log_info(vchiq_core_log_level,
6422 + "%d: qm %s@%x,%x (%d->%d)", state->id,
6423 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6424 + (unsigned int)header, size,
6425 + VCHIQ_MSG_SRCPORT(msgid),
6426 + VCHIQ_MSG_DSTPORT(msgid));
6428 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6429 + memcpy(header->data, elements[0].data,
6430 + elements[0].size);
6432 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6435 + header->msgid = msgid;
6436 + header->size = size;
6441 + svc_fourcc = service
6442 + ? service->base.fourcc
6443 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6445 + vchiq_log_info(vchiq_core_msg_log_level,
6446 + "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6447 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6448 + VCHIQ_MSG_TYPE(msgid),
6449 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6450 + VCHIQ_MSG_SRCPORT(msgid),
6451 + VCHIQ_MSG_DSTPORT(msgid),
6455 + /* Make sure the new header is visible to the peer. */
6458 + /* Make the new tx_pos visible to the peer. */
6459 + local->tx_pos = state->local_tx_pos;
6462 + if (service && (type == VCHIQ_MSG_CLOSE))
6463 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
6465 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6466 + mutex_unlock(&state->slot_mutex);
6468 + remote_event_signal(&state->remote->trigger);
6470 + return VCHIQ_SUCCESS;
6473 +/* Called by the slot handler and application threads */
6474 +static VCHIQ_STATUS_T
6475 +queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
6476 + int msgid, const VCHIQ_ELEMENT_T *elements,
6477 + int count, int size, int is_blocking)
6479 + VCHIQ_SHARED_STATE_T *local;
6480 + VCHIQ_HEADER_T *header;
6482 + local = state->local;
6484 + if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
6485 + (mutex_lock_interruptible(&state->sync_mutex) != 0))
6486 + return VCHIQ_RETRY;
6488 + remote_event_wait(&local->sync_release);
6492 + header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
6493 + local->slot_sync);
6496 + int oldmsgid = header->msgid;
6497 + if (oldmsgid != VCHIQ_MSGID_PADDING)
6498 + vchiq_log_error(vchiq_core_log_level,
6499 + "%d: qms - msgid %x, not PADDING",
6500 + state->id, oldmsgid);
6506 + vchiq_log_info(vchiq_sync_log_level,
6507 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6508 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6509 + (unsigned int)header, size,
6510 + VCHIQ_MSG_SRCPORT(msgid),
6511 + VCHIQ_MSG_DSTPORT(msgid));
6513 + for (i = 0, pos = 0; i < (unsigned int)count;
6514 + pos += elements[i++].size)
6515 + if (elements[i].size) {
6516 + if (vchiq_copy_from_user
6517 + (header->data + pos, elements[i].data,
6518 + (size_t) elements[i].size) !=
6520 + mutex_unlock(&state->sync_mutex);
6521 + VCHIQ_SERVICE_STATS_INC(service,
6523 + return VCHIQ_ERROR;
6526 + if (vchiq_sync_log_level >=
6528 + vchiq_log_dump_mem("Sent Sync",
6529 + 0, header->data + pos,
6531 + elements[0].size));
6535 + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
6536 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
6538 + vchiq_log_info(vchiq_sync_log_level,
6539 + "%d: qms %s@%x,%x (%d->%d)", state->id,
6540 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6541 + (unsigned int)header, size,
6542 + VCHIQ_MSG_SRCPORT(msgid),
6543 + VCHIQ_MSG_DSTPORT(msgid));
6545 + WARN_ON(!((count == 1) && (size == elements[0].size)));
6546 + memcpy(header->data, elements[0].data,
6547 + elements[0].size);
6549 + VCHIQ_STATS_INC(state, ctrl_tx_count);
6552 + header->size = size;
6553 + header->msgid = msgid;
6555 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
6558 + svc_fourcc = service
6559 + ? service->base.fourcc
6560 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
6562 + vchiq_log_trace(vchiq_sync_log_level,
6563 + "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
6564 + msg_type_str(VCHIQ_MSG_TYPE(msgid)),
6565 + VCHIQ_MSG_TYPE(msgid),
6566 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
6567 + VCHIQ_MSG_SRCPORT(msgid),
6568 + VCHIQ_MSG_DSTPORT(msgid),
6572 + /* Make sure the new header is visible to the peer. */
6575 + remote_event_signal(&state->remote->sync_trigger);
6577 + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
6578 + mutex_unlock(&state->sync_mutex);
6580 + return VCHIQ_SUCCESS;
6584 +claim_slot(VCHIQ_SLOT_INFO_T *slot)
6586 + slot->use_count++;
6590 +release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
6591 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_T *service)
6593 + int release_count;
6595 + mutex_lock(&state->recycle_mutex);
6598 + int msgid = header->msgid;
6599 + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
6600 + (service && service->closing)) {
6601 + mutex_unlock(&state->recycle_mutex);
6605 + /* Rewrite the message header to prevent a double
6607 + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
6610 + release_count = slot_info->release_count;
6611 + slot_info->release_count = ++release_count;
6613 + if (release_count == slot_info->use_count) {
6614 + int slot_queue_recycle;
6615 + /* Add to the freed queue */
6617 + /* A read barrier is necessary here to prevent speculative
6618 + ** fetches of remote->slot_queue_recycle from overtaking the
6622 + slot_queue_recycle = state->remote->slot_queue_recycle;
6623 + state->remote->slot_queue[slot_queue_recycle &
6624 + VCHIQ_SLOT_QUEUE_MASK] =
6625 + SLOT_INDEX_FROM_INFO(state, slot_info);
6626 + state->remote->slot_queue_recycle = slot_queue_recycle + 1;
6627 + vchiq_log_info(vchiq_core_log_level,
6628 + "%d: release_slot %d - recycle->%x",
6629 + state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
6630 + state->remote->slot_queue_recycle);
6632 + /* A write barrier is necessary, but remote_event_signal
6633 + ** contains one. */
6634 + remote_event_signal(&state->remote->recycle);
6637 + mutex_unlock(&state->recycle_mutex);
6640 +/* Called by the slot handler - don't hold the bulk mutex */
6641 +static VCHIQ_STATUS_T
6642 +notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
6645 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
6647 + vchiq_log_trace(vchiq_core_log_level,
6648 + "%d: nb:%d %cx - p=%x rn=%x r=%x",
6649 + service->state->id, service->localport,
6650 + (queue == &service->bulk_tx) ? 't' : 'r',
6651 + queue->process, queue->remote_notify, queue->remove);
6653 + if (service->state->is_master) {
6654 + while (queue->remote_notify != queue->process) {
6655 + VCHIQ_BULK_T *bulk =
6656 + &queue->bulks[BULK_INDEX(queue->remote_notify)];
6657 + int msgtype = (bulk->dir == VCHIQ_BULK_TRANSMIT) ?
6658 + VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
6659 + int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
6660 + service->remoteport);
6661 + VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
6662 + /* Only reply to non-dummy bulk requests */
6663 + if (bulk->remote_data) {
6664 + status = queue_message(service->state, NULL,
6665 + msgid, &element, 1, 4, 0);
6666 + if (status != VCHIQ_SUCCESS)
6669 + queue->remote_notify++;
6672 + queue->remote_notify = queue->process;
6675 + if (status == VCHIQ_SUCCESS) {
6676 + while (queue->remove != queue->remote_notify) {
6677 + VCHIQ_BULK_T *bulk =
6678 + &queue->bulks[BULK_INDEX(queue->remove)];
6680 + /* Only generate callbacks for non-dummy bulk
6681 + ** requests, and non-terminated services */
6682 + if (bulk->data && service->instance) {
6683 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
6684 + if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
6685 + VCHIQ_SERVICE_STATS_INC(service,
6687 + VCHIQ_SERVICE_STATS_ADD(service,
6691 + VCHIQ_SERVICE_STATS_INC(service,
6693 + VCHIQ_SERVICE_STATS_ADD(service,
6698 + VCHIQ_SERVICE_STATS_INC(service,
6699 + bulk_aborted_count);
6701 + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
6702 + struct bulk_waiter *waiter;
6703 + spin_lock(&bulk_waiter_spinlock);
6704 + waiter = bulk->userdata;
6706 + waiter->actual = bulk->actual;
6707 + up(&waiter->event);
6709 + spin_unlock(&bulk_waiter_spinlock);
6710 + } else if (bulk->mode ==
6711 + VCHIQ_BULK_MODE_CALLBACK) {
6712 + VCHIQ_REASON_T reason = (bulk->dir ==
6713 + VCHIQ_BULK_TRANSMIT) ?
6715 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6716 + VCHIQ_BULK_TRANSMIT_ABORTED :
6717 + VCHIQ_BULK_TRANSMIT_DONE) :
6719 + VCHIQ_BULK_ACTUAL_ABORTED) ?
6720 + VCHIQ_BULK_RECEIVE_ABORTED :
6721 + VCHIQ_BULK_RECEIVE_DONE);
6722 + status = make_service_callback(service,
6723 + reason, NULL, bulk->userdata);
6724 + if (status == VCHIQ_RETRY)
6730 + up(&service->bulk_remove_event);
6733 + status = VCHIQ_SUCCESS;
6736 + if (status == VCHIQ_RETRY)
6737 + request_poll(service->state, service,
6738 + (queue == &service->bulk_tx) ?
6739 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
6744 +/* Called by the slot handler thread */
6746 +poll_services(VCHIQ_STATE_T *state)
6750 + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
6752 + flags = atomic_xchg(&state->poll_services[group], 0);
6753 + for (i = 0; flags; i++) {
6754 + if (flags & (1 << i)) {
6755 + VCHIQ_SERVICE_T *service =
6756 + find_service_by_port(state,
6758 + uint32_t service_flags;
6759 + flags &= ~(1 << i);
6763 + atomic_xchg(&service->poll_flags, 0);
6764 + if (service_flags &
6765 + (1 << VCHIQ_POLL_REMOVE)) {
6766 + vchiq_log_info(vchiq_core_log_level,
6767 + "%d: ps - remove %d<->%d",
6768 + state->id, service->localport,
6769 + service->remoteport);
6771 + /* Make it look like a client, because
6772 + it must be removed and not left in
6773 + the LISTENING state. */
6774 + service->public_fourcc =
6775 + VCHIQ_FOURCC_INVALID;
6777 + if (vchiq_close_service_internal(
6778 + service, 0/*!close_recvd*/) !=
6780 + request_poll(state, service,
6781 + VCHIQ_POLL_REMOVE);
6782 + } else if (service_flags &
6783 + (1 << VCHIQ_POLL_TERMINATE)) {
6784 + vchiq_log_info(vchiq_core_log_level,
6785 + "%d: ps - terminate %d<->%d",
6786 + state->id, service->localport,
6787 + service->remoteport);
6788 + if (vchiq_close_service_internal(
6789 + service, 0/*!close_recvd*/) !=
6791 + request_poll(state, service,
6792 + VCHIQ_POLL_TERMINATE);
6794 + if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
6795 + notify_bulks(service,
6796 + &service->bulk_tx,
6798 + if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
6799 + notify_bulks(service,
6800 + &service->bulk_rx,
6802 + unlock_service(service);
6808 +/* Called by the slot handler or application threads, holding the bulk mutex. */
6810 +resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6812 + VCHIQ_STATE_T *state = service->state;
6816 + while ((queue->process != queue->local_insert) &&
6817 + (queue->process != queue->remote_insert)) {
6818 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6820 + vchiq_log_trace(vchiq_core_log_level,
6821 + "%d: rb:%d %cx - li=%x ri=%x p=%x",
6822 + state->id, service->localport,
6823 + (queue == &service->bulk_tx) ? 't' : 'r',
6824 + queue->local_insert, queue->remote_insert,
6827 + WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
6828 + WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
6830 + rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
6834 + vchiq_transfer_bulk(bulk);
6835 + mutex_unlock(&state->bulk_transfer_mutex);
6837 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
6838 + const char *header = (queue == &service->bulk_tx) ?
6839 + "Send Bulk to" : "Recv Bulk from";
6840 + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
6841 + vchiq_log_info(vchiq_core_msg_log_level,
6842 + "%s %c%c%c%c d:%d len:%d %x<->%x",
6844 + VCHIQ_FOURCC_AS_4CHARS(
6845 + service->base.fourcc),
6846 + service->remoteport,
6848 + (unsigned int)bulk->data,
6849 + (unsigned int)bulk->remote_data);
6851 + vchiq_log_info(vchiq_core_msg_log_level,
6852 + "%s %c%c%c%c d:%d ABORTED - tx len:%d,"
6853 + " rx len:%d %x<->%x",
6855 + VCHIQ_FOURCC_AS_4CHARS(
6856 + service->base.fourcc),
6857 + service->remoteport,
6859 + bulk->remote_size,
6860 + (unsigned int)bulk->data,
6861 + (unsigned int)bulk->remote_data);
6864 + vchiq_complete_bulk(bulk);
6871 +/* Called with the bulk_mutex held */
6873 +abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
6875 + int is_tx = (queue == &service->bulk_tx);
6876 + vchiq_log_trace(vchiq_core_log_level,
6877 + "%d: aob:%d %cx - li=%x ri=%x p=%x",
6878 + service->state->id, service->localport, is_tx ? 't' : 'r',
6879 + queue->local_insert, queue->remote_insert, queue->process);
6881 + WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
6882 + WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
6884 + while ((queue->process != queue->local_insert) ||
6885 + (queue->process != queue->remote_insert)) {
6886 + VCHIQ_BULK_T *bulk = &queue->bulks[BULK_INDEX(queue->process)];
6888 + if (queue->process == queue->remote_insert) {
6889 + /* fabricate a matching dummy bulk */
6890 + bulk->remote_data = NULL;
6891 + bulk->remote_size = 0;
6892 + queue->remote_insert++;
6895 + if (queue->process != queue->local_insert) {
6896 + vchiq_complete_bulk(bulk);
6898 + vchiq_log_info(vchiq_core_msg_log_level,
6899 + "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
6901 + is_tx ? "Send Bulk to" : "Recv Bulk from",
6902 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
6903 + service->remoteport,
6905 + bulk->remote_size);
6907 + /* fabricate a matching dummy bulk */
6908 + bulk->data = NULL;
6910 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
6911 + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
6912 + VCHIQ_BULK_RECEIVE;
6913 + queue->local_insert++;
6920 +/* Called from the slot handler thread */
6922 +pause_bulks(VCHIQ_STATE_T *state)
6924 + if (unlikely(atomic_inc_return(&pause_bulks_count) != 1)) {
6926 + atomic_set(&pause_bulks_count, 1);
6930 + /* Block bulk transfers from all services */
6931 + mutex_lock(&state->bulk_transfer_mutex);
6934 +/* Called from the slot handler thread */
6936 +resume_bulks(VCHIQ_STATE_T *state)
6939 + if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
6941 + atomic_set(&pause_bulks_count, 0);
6945 + /* Allow bulk transfers from all services */
6946 + mutex_unlock(&state->bulk_transfer_mutex);
6948 + if (state->deferred_bulks == 0)
6951 + /* Deal with any bulks which had to be deferred due to being in
6952 + * paused state. Don't try to match up to number of deferred bulks
6953 + * in case we've had something come and close the service in the
6954 + * interim - just process all bulk queues for all services */
6955 + vchiq_log_info(vchiq_core_log_level, "%s: processing %d deferred bulks",
6956 + __func__, state->deferred_bulks);
6958 + for (i = 0; i < state->unused_service; i++) {
6959 + VCHIQ_SERVICE_T *service = state->services[i];
6960 + int resolved_rx = 0;
6961 + int resolved_tx = 0;
6962 + if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
6965 + mutex_lock(&service->bulk_mutex);
6966 + resolved_rx = resolve_bulks(service, &service->bulk_rx);
6967 + resolved_tx = resolve_bulks(service, &service->bulk_tx);
6968 + mutex_unlock(&service->bulk_mutex);
6970 + notify_bulks(service, &service->bulk_rx, 1);
6972 + notify_bulks(service, &service->bulk_tx, 1);
6974 + state->deferred_bulks = 0;
6978 +parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
6980 + VCHIQ_SERVICE_T *service = NULL;
6983 + unsigned int localport, remoteport;
6985 + msgid = header->msgid;
6986 + size = header->size;
6987 + type = VCHIQ_MSG_TYPE(msgid);
6988 + localport = VCHIQ_MSG_DSTPORT(msgid);
6989 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
6990 + if (size >= sizeof(struct vchiq_open_payload)) {
6991 + const struct vchiq_open_payload *payload =
6992 + (struct vchiq_open_payload *)header->data;
6993 + unsigned int fourcc;
6995 + fourcc = payload->fourcc;
6996 + vchiq_log_info(vchiq_core_log_level,
6997 + "%d: prs OPEN@%x (%d->'%c%c%c%c')",
6998 + state->id, (unsigned int)header,
7000 + VCHIQ_FOURCC_AS_4CHARS(fourcc));
7002 + service = get_listening_service(state, fourcc);
7005 + /* A matching service exists */
7006 + short version = payload->version;
7007 + short version_min = payload->version_min;
7008 + if ((service->version < version_min) ||
7009 + (version < service->version_min)) {
7010 + /* Version mismatch */
7011 + vchiq_loud_error_header();
7012 + vchiq_loud_error("%d: service %d (%c%c%c%c) "
7013 + "version mismatch - local (%d, min %d)"
7014 + " vs. remote (%d, min %d)",
7015 + state->id, service->localport,
7016 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
7017 + service->version, service->version_min,
7018 + version, version_min);
7019 + vchiq_loud_error_footer();
7020 + unlock_service(service);
7024 + service->peer_version = version;
7026 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
7027 + struct vchiq_openack_payload ack_payload = {
7030 + VCHIQ_ELEMENT_T body = {
7032 + sizeof(ack_payload)
7035 + /* Acknowledge the OPEN */
7036 + if (service->sync) {
7037 + if (queue_message_sync(state, NULL,
7039 + VCHIQ_MSG_OPENACK,
7040 + service->localport,
7042 + &body, 1, sizeof(ack_payload),
7043 + 0) == VCHIQ_RETRY)
7044 + goto bail_not_ready;
7046 + if (queue_message(state, NULL,
7048 + VCHIQ_MSG_OPENACK,
7049 + service->localport,
7051 + &body, 1, sizeof(ack_payload),
7052 + 0) == VCHIQ_RETRY)
7053 + goto bail_not_ready;
7056 + /* The service is now open */
7057 + vchiq_set_service_state(service,
7058 + service->sync ? VCHIQ_SRVSTATE_OPENSYNC
7059 + : VCHIQ_SRVSTATE_OPEN);
7062 + service->remoteport = remoteport;
7063 + service->client_id = ((int *)header->data)[1];
7064 + if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
7065 + NULL, NULL) == VCHIQ_RETRY) {
7066 + /* Bail out if not ready */
7067 + service->remoteport = VCHIQ_PORT_FREE;
7068 + goto bail_not_ready;
7071 + /* Success - the message has been dealt with */
7072 + unlock_service(service);
7078 + /* No available service, or an invalid request - send a CLOSE */
7079 + if (queue_message(state, NULL,
7080 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
7081 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7082 + goto bail_not_ready;
7088 + unlock_service(service);
7093 +/* Called by the slot handler thread */
7095 +parse_rx_slots(VCHIQ_STATE_T *state)
7097 + VCHIQ_SHARED_STATE_T *remote = state->remote;
7098 + VCHIQ_SERVICE_T *service = NULL;
7100 + DEBUG_INITIALISE(state->local)
7102 + tx_pos = remote->tx_pos;
7104 + while (state->rx_pos != tx_pos) {
7105 + VCHIQ_HEADER_T *header;
7108 + unsigned int localport, remoteport;
7110 + DEBUG_TRACE(PARSE_LINE);
7111 + if (!state->rx_data) {
7113 + WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
7114 + rx_index = remote->slot_queue[
7115 + SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
7116 + VCHIQ_SLOT_QUEUE_MASK];
7117 + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
7119 + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
7121 + /* Initialise use_count to one, and increment
7122 + ** release_count at the end of the slot to avoid
7123 + ** releasing the slot prematurely. */
7124 + state->rx_info->use_count = 1;
7125 + state->rx_info->release_count = 0;
7128 + header = (VCHIQ_HEADER_T *)(state->rx_data +
7129 + (state->rx_pos & VCHIQ_SLOT_MASK));
7130 + DEBUG_VALUE(PARSE_HEADER, (int)header);
7131 + msgid = header->msgid;
7132 + DEBUG_VALUE(PARSE_MSGID, msgid);
7133 + size = header->size;
7134 + type = VCHIQ_MSG_TYPE(msgid);
7135 + localport = VCHIQ_MSG_DSTPORT(msgid);
7136 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7138 + if (type != VCHIQ_MSG_DATA)
7139 + VCHIQ_STATS_INC(state, ctrl_rx_count);
7142 + case VCHIQ_MSG_OPENACK:
7143 + case VCHIQ_MSG_CLOSE:
7144 + case VCHIQ_MSG_DATA:
7145 + case VCHIQ_MSG_BULK_RX:
7146 + case VCHIQ_MSG_BULK_TX:
7147 + case VCHIQ_MSG_BULK_RX_DONE:
7148 + case VCHIQ_MSG_BULK_TX_DONE:
7149 + service = find_service_by_port(state, localport);
7150 + if ((!service || service->remoteport != remoteport) &&
7151 + (localport == 0) &&
7152 + (type == VCHIQ_MSG_CLOSE)) {
7153 + /* This could be a CLOSE from a client which
7154 + hadn't yet received the OPENACK - look for
7155 + the connected service */
7157 + unlock_service(service);
7158 + service = get_connected_service(state,
7161 + vchiq_log_warning(vchiq_core_log_level,
7162 + "%d: prs %s@%x (%d->%d) - "
7163 + "found connected service %d",
7164 + state->id, msg_type_str(type),
7165 + (unsigned int)header,
7166 + remoteport, localport,
7167 + service->localport);
7171 + vchiq_log_error(vchiq_core_log_level,
7172 + "%d: prs %s@%x (%d->%d) - "
7173 + "invalid/closed service %d",
7174 + state->id, msg_type_str(type),
7175 + (unsigned int)header,
7176 + remoteport, localport, localport);
7177 + goto skip_message;
7184 + if (vchiq_core_msg_log_level >= VCHIQ_LOG_INFO) {
7187 + svc_fourcc = service
7188 + ? service->base.fourcc
7189 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7190 + vchiq_log_info(vchiq_core_msg_log_level,
7191 + "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
7193 + msg_type_str(type), type,
7194 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7195 + remoteport, localport, size);
7197 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7201 + if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
7202 + > VCHIQ_SLOT_SIZE) {
7203 + vchiq_log_error(vchiq_core_log_level,
7204 + "header %x (msgid %x) - size %x too big for "
7206 + (unsigned int)header, (unsigned int)msgid,
7207 + (unsigned int)size);
7208 + WARN(1, "oversized for slot\n");
7212 + case VCHIQ_MSG_OPEN:
7213 + WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
7214 + if (!parse_open(state, header))
7215 + goto bail_not_ready;
7217 + case VCHIQ_MSG_OPENACK:
7218 + if (size >= sizeof(struct vchiq_openack_payload)) {
7219 + const struct vchiq_openack_payload *payload =
7220 + (struct vchiq_openack_payload *)
7222 + service->peer_version = payload->version;
7224 + vchiq_log_info(vchiq_core_log_level,
7225 + "%d: prs OPENACK@%x,%x (%d->%d) v:%d",
7226 + state->id, (unsigned int)header, size,
7227 + remoteport, localport, service->peer_version);
7228 + if (service->srvstate ==
7229 + VCHIQ_SRVSTATE_OPENING) {
7230 + service->remoteport = remoteport;
7231 + vchiq_set_service_state(service,
7232 + VCHIQ_SRVSTATE_OPEN);
7233 + up(&service->remove_event);
7235 + vchiq_log_error(vchiq_core_log_level,
7236 + "OPENACK received in state %s",
7237 + srvstate_names[service->srvstate]);
7239 + case VCHIQ_MSG_CLOSE:
7240 + WARN_ON(size != 0); /* There should be no data */
7242 + vchiq_log_info(vchiq_core_log_level,
7243 + "%d: prs CLOSE@%x (%d->%d)",
7244 + state->id, (unsigned int)header,
7245 + remoteport, localport);
7247 + mark_service_closing_internal(service, 1);
7249 + if (vchiq_close_service_internal(service,
7250 + 1/*close_recvd*/) == VCHIQ_RETRY)
7251 + goto bail_not_ready;
7253 + vchiq_log_info(vchiq_core_log_level,
7254 + "Close Service %c%c%c%c s:%u d:%d",
7255 + VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
7256 + service->localport,
7257 + service->remoteport);
7259 + case VCHIQ_MSG_DATA:
7260 + vchiq_log_trace(vchiq_core_log_level,
7261 + "%d: prs DATA@%x,%x (%d->%d)",
7262 + state->id, (unsigned int)header, size,
7263 + remoteport, localport);
7265 + if ((service->remoteport == remoteport)
7266 + && (service->srvstate ==
7267 + VCHIQ_SRVSTATE_OPEN)) {
7268 + header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
7269 + claim_slot(state->rx_info);
7270 + DEBUG_TRACE(PARSE_LINE);
7271 + if (make_service_callback(service,
7272 + VCHIQ_MESSAGE_AVAILABLE, header,
7273 + NULL) == VCHIQ_RETRY) {
7274 + DEBUG_TRACE(PARSE_LINE);
7275 + goto bail_not_ready;
7277 + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
7278 + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
7281 + VCHIQ_STATS_INC(state, error_count);
7284 + case VCHIQ_MSG_CONNECT:
7285 + vchiq_log_info(vchiq_core_log_level,
7286 + "%d: prs CONNECT@%x",
7287 + state->id, (unsigned int)header);
7288 + up(&state->connect);
7290 + case VCHIQ_MSG_BULK_RX:
7291 + case VCHIQ_MSG_BULK_TX: {
7292 + VCHIQ_BULK_QUEUE_T *queue;
7293 + WARN_ON(!state->is_master);
7294 + queue = (type == VCHIQ_MSG_BULK_RX) ?
7295 + &service->bulk_tx : &service->bulk_rx;
7296 + if ((service->remoteport == remoteport)
7297 + && (service->srvstate ==
7298 + VCHIQ_SRVSTATE_OPEN)) {
7299 + VCHIQ_BULK_T *bulk;
7302 + DEBUG_TRACE(PARSE_LINE);
7303 + if (mutex_lock_interruptible(
7304 + &service->bulk_mutex) != 0) {
7305 + DEBUG_TRACE(PARSE_LINE);
7306 + goto bail_not_ready;
7309 + WARN_ON(!(queue->remote_insert < queue->remove +
7310 + VCHIQ_NUM_SERVICE_BULKS));
7311 + bulk = &queue->bulks[
7312 + BULK_INDEX(queue->remote_insert)];
7313 + bulk->remote_data =
7314 + (void *)((int *)header->data)[0];
7315 + bulk->remote_size = ((int *)header->data)[1];
7318 + vchiq_log_info(vchiq_core_log_level,
7319 + "%d: prs %s@%x (%d->%d) %x@%x",
7320 + state->id, msg_type_str(type),
7321 + (unsigned int)header,
7322 + remoteport, localport,
7323 + bulk->remote_size,
7324 + (unsigned int)bulk->remote_data);
7326 + queue->remote_insert++;
7328 + if (atomic_read(&pause_bulks_count)) {
7329 + state->deferred_bulks++;
7330 + vchiq_log_info(vchiq_core_log_level,
7331 + "%s: deferring bulk (%d)",
7333 + state->deferred_bulks);
7334 + if (state->conn_state !=
7335 + VCHIQ_CONNSTATE_PAUSE_SENT)
7337 + vchiq_core_log_level,
7338 + "%s: bulks paused in "
7339 + "unexpected state %s",
7342 + state->conn_state]);
7343 + } else if (state->conn_state ==
7344 + VCHIQ_CONNSTATE_CONNECTED) {
7345 + DEBUG_TRACE(PARSE_LINE);
7346 + resolved = resolve_bulks(service,
7350 + mutex_unlock(&service->bulk_mutex);
7352 + notify_bulks(service, queue,
7356 + case VCHIQ_MSG_BULK_RX_DONE:
7357 + case VCHIQ_MSG_BULK_TX_DONE:
7358 + WARN_ON(state->is_master);
7359 + if ((service->remoteport == remoteport)
7360 + && (service->srvstate !=
7361 + VCHIQ_SRVSTATE_FREE)) {
7362 + VCHIQ_BULK_QUEUE_T *queue;
7363 + VCHIQ_BULK_T *bulk;
7365 + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
7366 + &service->bulk_rx : &service->bulk_tx;
7368 + DEBUG_TRACE(PARSE_LINE);
7369 + if (mutex_lock_interruptible(
7370 + &service->bulk_mutex) != 0) {
7371 + DEBUG_TRACE(PARSE_LINE);
7372 + goto bail_not_ready;
7374 + if ((int)(queue->remote_insert -
7375 + queue->local_insert) >= 0) {
7376 + vchiq_log_error(vchiq_core_log_level,
7377 + "%d: prs %s@%x (%d->%d) "
7378 + "unexpected (ri=%d,li=%d)",
7379 + state->id, msg_type_str(type),
7380 + (unsigned int)header,
7381 + remoteport, localport,
7382 + queue->remote_insert,
7383 + queue->local_insert);
7384 + mutex_unlock(&service->bulk_mutex);
7388 + BUG_ON(queue->process == queue->local_insert);
7389 + BUG_ON(queue->process != queue->remote_insert);
7391 + bulk = &queue->bulks[
7392 + BULK_INDEX(queue->remote_insert)];
7393 + bulk->actual = *(int *)header->data;
7394 + queue->remote_insert++;
7396 + vchiq_log_info(vchiq_core_log_level,
7397 + "%d: prs %s@%x (%d->%d) %x@%x",
7398 + state->id, msg_type_str(type),
7399 + (unsigned int)header,
7400 + remoteport, localport,
7401 + bulk->actual, (unsigned int)bulk->data);
7403 + vchiq_log_trace(vchiq_core_log_level,
7404 + "%d: prs:%d %cx li=%x ri=%x p=%x",
7405 + state->id, localport,
7406 + (type == VCHIQ_MSG_BULK_RX_DONE) ?
7408 + queue->local_insert,
7409 + queue->remote_insert, queue->process);
7411 + DEBUG_TRACE(PARSE_LINE);
7412 + WARN_ON(queue->process == queue->local_insert);
7413 + vchiq_complete_bulk(bulk);
7415 + mutex_unlock(&service->bulk_mutex);
7416 + DEBUG_TRACE(PARSE_LINE);
7417 + notify_bulks(service, queue, 1/*retry_poll*/);
7418 + DEBUG_TRACE(PARSE_LINE);
7421 + case VCHIQ_MSG_PADDING:
7422 + vchiq_log_trace(vchiq_core_log_level,
7423 + "%d: prs PADDING@%x,%x",
7424 + state->id, (unsigned int)header, size);
7426 + case VCHIQ_MSG_PAUSE:
7427 + /* If initiated, signal the application thread */
7428 + vchiq_log_trace(vchiq_core_log_level,
7429 + "%d: prs PAUSE@%x,%x",
7430 + state->id, (unsigned int)header, size);
7431 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
7432 + vchiq_log_error(vchiq_core_log_level,
7433 + "%d: PAUSE received in state PAUSED",
7437 + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
7438 + /* Send a PAUSE in response */
7439 + if (queue_message(state, NULL,
7440 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7441 + NULL, 0, 0, 0) == VCHIQ_RETRY)
7442 + goto bail_not_ready;
7443 + if (state->is_master)
7444 + pause_bulks(state);
7446 + /* At this point slot_mutex is held */
7447 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
7448 + vchiq_platform_paused(state);
7450 + case VCHIQ_MSG_RESUME:
7451 + vchiq_log_trace(vchiq_core_log_level,
7452 + "%d: prs RESUME@%x,%x",
7453 + state->id, (unsigned int)header, size);
7454 + /* Release the slot mutex */
7455 + mutex_unlock(&state->slot_mutex);
7456 + if (state->is_master)
7457 + resume_bulks(state);
7458 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
7459 + vchiq_platform_resumed(state);
7462 + case VCHIQ_MSG_REMOTE_USE:
7463 + vchiq_on_remote_use(state);
7465 + case VCHIQ_MSG_REMOTE_RELEASE:
7466 + vchiq_on_remote_release(state);
7468 + case VCHIQ_MSG_REMOTE_USE_ACTIVE:
7469 + vchiq_on_remote_use_active(state);
7473 + vchiq_log_error(vchiq_core_log_level,
7474 + "%d: prs invalid msgid %x@%x,%x",
7475 + state->id, msgid, (unsigned int)header, size);
7476 + WARN(1, "invalid message\n");
7482 + unlock_service(service);
7486 + state->rx_pos += calc_stride(size);
7488 + DEBUG_TRACE(PARSE_LINE);
7489 + /* Perform some housekeeping when the end of the slot is
7491 + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
7492 + /* Remove the extra reference count. */
7493 + release_slot(state, state->rx_info, NULL, NULL);
7494 + state->rx_data = NULL;
7500 + unlock_service(service);
7503 +/* Called by the slot handler thread */
7505 +slot_handler_func(void *v)
7507 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7508 + VCHIQ_SHARED_STATE_T *local = state->local;
7509 + DEBUG_INITIALISE(local)
7512 + DEBUG_COUNT(SLOT_HANDLER_COUNT);
7513 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7514 + remote_event_wait(&local->trigger);
7518 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7519 + if (state->poll_needed) {
7520 + /* Check if we need to suspend - may change our
7522 + vchiq_platform_check_suspend(state);
7524 + state->poll_needed = 0;
7526 + /* Handle service polling and other rare conditions here
7527 + ** out of the mainline code */
7528 + switch (state->conn_state) {
7529 + case VCHIQ_CONNSTATE_CONNECTED:
7530 + /* Poll the services as requested */
7531 + poll_services(state);
7534 + case VCHIQ_CONNSTATE_PAUSING:
7535 + if (state->is_master)
7536 + pause_bulks(state);
7537 + if (queue_message(state, NULL,
7538 + VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
7539 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7540 + vchiq_set_conn_state(state,
7541 + VCHIQ_CONNSTATE_PAUSE_SENT);
7543 + if (state->is_master)
7544 + resume_bulks(state);
7546 + state->poll_needed = 1;
7550 + case VCHIQ_CONNSTATE_PAUSED:
7551 + vchiq_platform_resume(state);
7554 + case VCHIQ_CONNSTATE_RESUMING:
7555 + if (queue_message(state, NULL,
7556 + VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
7557 + NULL, 0, 0, 0) != VCHIQ_RETRY) {
7558 + if (state->is_master)
7559 + resume_bulks(state);
7560 + vchiq_set_conn_state(state,
7561 + VCHIQ_CONNSTATE_CONNECTED);
7562 + vchiq_platform_resumed(state);
7564 + /* This should really be impossible,
7565 + ** since the PAUSE should have flushed
7566 + ** through outstanding messages. */
7567 + vchiq_log_error(vchiq_core_log_level,
7568 + "Failed to send RESUME "
7574 + case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
7575 + case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
7576 + vchiq_platform_handle_timeout(state);
7585 + DEBUG_TRACE(SLOT_HANDLER_LINE);
7586 + parse_rx_slots(state);
7592 +/* Called by the recycle thread */
7594 +recycle_func(void *v)
7596 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7597 + VCHIQ_SHARED_STATE_T *local = state->local;
7600 + remote_event_wait(&local->recycle);
7602 + process_free_queue(state);
7608 +/* Called by the sync thread */
7612 + VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
7613 + VCHIQ_SHARED_STATE_T *local = state->local;
7614 + VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state,
7615 + state->remote->slot_sync);
7618 + VCHIQ_SERVICE_T *service;
7621 + unsigned int localport, remoteport;
7623 + remote_event_wait(&local->sync_trigger);
7627 + msgid = header->msgid;
7628 + size = header->size;
7629 + type = VCHIQ_MSG_TYPE(msgid);
7630 + localport = VCHIQ_MSG_DSTPORT(msgid);
7631 + remoteport = VCHIQ_MSG_SRCPORT(msgid);
7633 + service = find_service_by_port(state, localport);
7636 + vchiq_log_error(vchiq_sync_log_level,
7637 + "%d: sf %s@%x (%d->%d) - "
7638 + "invalid/closed service %d",
7639 + state->id, msg_type_str(type),
7640 + (unsigned int)header,
7641 + remoteport, localport, localport);
7642 + release_message_sync(state, header);
7646 + if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
7649 + svc_fourcc = service
7650 + ? service->base.fourcc
7651 + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
7652 + vchiq_log_trace(vchiq_sync_log_level,
7653 + "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
7654 + msg_type_str(type),
7655 + VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
7656 + remoteport, localport, size);
7658 + vchiq_log_dump_mem("Rcvd", 0, header->data,
7663 + case VCHIQ_MSG_OPENACK:
7664 + if (size >= sizeof(struct vchiq_openack_payload)) {
7665 + const struct vchiq_openack_payload *payload =
7666 + (struct vchiq_openack_payload *)
7668 + service->peer_version = payload->version;
7670 + vchiq_log_info(vchiq_sync_log_level,
7671 + "%d: sf OPENACK@%x,%x (%d->%d) v:%d",
7672 + state->id, (unsigned int)header, size,
7673 + remoteport, localport, service->peer_version);
7674 + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
7675 + service->remoteport = remoteport;
7676 + vchiq_set_service_state(service,
7677 + VCHIQ_SRVSTATE_OPENSYNC);
7678 + up(&service->remove_event);
7680 + release_message_sync(state, header);
7683 + case VCHIQ_MSG_DATA:
7684 + vchiq_log_trace(vchiq_sync_log_level,
7685 + "%d: sf DATA@%x,%x (%d->%d)",
7686 + state->id, (unsigned int)header, size,
7687 + remoteport, localport);
7689 + if ((service->remoteport == remoteport) &&
7690 + (service->srvstate ==
7691 + VCHIQ_SRVSTATE_OPENSYNC)) {
7692 + if (make_service_callback(service,
7693 + VCHIQ_MESSAGE_AVAILABLE, header,
7694 + NULL) == VCHIQ_RETRY)
7695 + vchiq_log_error(vchiq_sync_log_level,
7696 + "synchronous callback to "
7697 + "service %d returns "
7704 + vchiq_log_error(vchiq_sync_log_level,
7705 + "%d: sf unexpected msgid %x@%x,%x",
7706 + state->id, msgid, (unsigned int)header, size);
7707 + release_message_sync(state, header);
7711 + unlock_service(service);
7719 +init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
7721 + queue->local_insert = 0;
7722 + queue->remote_insert = 0;
7723 + queue->process = 0;
7724 + queue->remote_notify = 0;
7725 + queue->remove = 0;
7729 +inline const char *
7730 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
7732 + return conn_state_names[conn_state];
7736 +VCHIQ_SLOT_ZERO_T *
7737 +vchiq_init_slots(void *mem_base, int mem_size)
7739 + int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
7740 + VCHIQ_SLOT_ZERO_T *slot_zero =
7741 + (VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
7742 + int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
7743 + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
7745 + /* Ensure there is enough memory to run an absolutely minimum system */
7746 + num_slots -= first_data_slot;
7748 + if (num_slots < 4) {
7749 + vchiq_log_error(vchiq_core_log_level,
7750 + "vchiq_init_slots - insufficient memory %x bytes",
7755 + memset(slot_zero, 0, sizeof(VCHIQ_SLOT_ZERO_T));
7757 + slot_zero->magic = VCHIQ_MAGIC;
7758 + slot_zero->version = VCHIQ_VERSION;
7759 + slot_zero->version_min = VCHIQ_VERSION_MIN;
7760 + slot_zero->slot_zero_size = sizeof(VCHIQ_SLOT_ZERO_T);
7761 + slot_zero->slot_size = VCHIQ_SLOT_SIZE;
7762 + slot_zero->max_slots = VCHIQ_MAX_SLOTS;
7763 + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
7765 + slot_zero->master.slot_sync = first_data_slot;
7766 + slot_zero->master.slot_first = first_data_slot + 1;
7767 + slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
7768 + slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
7769 + slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
7770 + slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
7776 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
7779 + VCHIQ_SHARED_STATE_T *local;
7780 + VCHIQ_SHARED_STATE_T *remote;
7781 + VCHIQ_STATUS_T status;
7782 + char threadname[10];
7786 + vchiq_log_warning(vchiq_core_log_level,
7787 + "%s: slot_zero = 0x%08lx, is_master = %d",
7788 + __func__, (unsigned long)slot_zero, is_master);
7790 + /* Check the input configuration */
7792 + if (slot_zero->magic != VCHIQ_MAGIC) {
7793 + vchiq_loud_error_header();
7794 + vchiq_loud_error("Invalid VCHIQ magic value found.");
7795 + vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
7796 + (unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
7797 + vchiq_loud_error_footer();
7798 + return VCHIQ_ERROR;
7801 + if (slot_zero->version < VCHIQ_VERSION_MIN) {
7802 + vchiq_loud_error_header();
7803 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7804 + vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
7806 + (unsigned int)slot_zero, slot_zero->version,
7807 + VCHIQ_VERSION_MIN);
7808 + vchiq_loud_error("Restart with a newer VideoCore image.");
7809 + vchiq_loud_error_footer();
7810 + return VCHIQ_ERROR;
7813 + if (VCHIQ_VERSION < slot_zero->version_min) {
7814 + vchiq_loud_error_header();
7815 + vchiq_loud_error("Incompatible VCHIQ versions found.");
7816 + vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
7818 + (unsigned int)slot_zero, VCHIQ_VERSION,
7819 + slot_zero->version_min);
7820 + vchiq_loud_error("Restart with a newer kernel.");
7821 + vchiq_loud_error_footer();
7822 + return VCHIQ_ERROR;
7825 + if ((slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T)) ||
7826 + (slot_zero->slot_size != VCHIQ_SLOT_SIZE) ||
7827 + (slot_zero->max_slots != VCHIQ_MAX_SLOTS) ||
7828 + (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
7829 + vchiq_loud_error_header();
7830 + if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
7831 + vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
7833 + (unsigned int)slot_zero,
7834 + slot_zero->slot_zero_size,
7835 + sizeof(VCHIQ_SLOT_ZERO_T));
7836 + if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
7837 + vchiq_loud_error("slot_zero=%x: slot_size=%d "
7839 + (unsigned int)slot_zero, slot_zero->slot_size,
7841 + if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
7842 + vchiq_loud_error("slot_zero=%x: max_slots=%d "
7844 + (unsigned int)slot_zero, slot_zero->max_slots,
7846 + if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
7847 + vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
7849 + (unsigned int)slot_zero,
7850 + slot_zero->max_slots_per_side,
7851 + VCHIQ_MAX_SLOTS_PER_SIDE);
7852 + vchiq_loud_error_footer();
7853 + return VCHIQ_ERROR;
7857 + local = &slot_zero->master;
7858 + remote = &slot_zero->slave;
7860 + local = &slot_zero->slave;
7861 + remote = &slot_zero->master;
7864 + if (local->initialised) {
7865 + vchiq_loud_error_header();
7866 + if (remote->initialised)
7867 + vchiq_loud_error("local state has already been "
7870 + vchiq_loud_error("master/slave mismatch - two %ss",
7871 + is_master ? "master" : "slave");
7872 + vchiq_loud_error_footer();
7873 + return VCHIQ_ERROR;
7876 + memset(state, 0, sizeof(VCHIQ_STATE_T));
7879 + state->is_master = is_master;
7882 + initialize shared state pointers
7885 + state->local = local;
7886 + state->remote = remote;
7887 + state->slot_data = (VCHIQ_SLOT_T *)slot_zero;
7890 + initialize events and mutexes
7893 + sema_init(&state->connect, 0);
7894 + mutex_init(&state->mutex);
7895 + sema_init(&state->trigger_event, 0);
7896 + sema_init(&state->recycle_event, 0);
7897 + sema_init(&state->sync_trigger_event, 0);
7898 + sema_init(&state->sync_release_event, 0);
7900 + mutex_init(&state->slot_mutex);
7901 + mutex_init(&state->recycle_mutex);
7902 + mutex_init(&state->sync_mutex);
7903 + mutex_init(&state->bulk_transfer_mutex);
7905 + sema_init(&state->slot_available_event, 0);
7906 + sema_init(&state->slot_remove_event, 0);
7907 + sema_init(&state->data_quota_event, 0);
7909 + state->slot_queue_available = 0;
7911 + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
7912 + VCHIQ_SERVICE_QUOTA_T *service_quota =
7913 + &state->service_quotas[i];
7914 + sema_init(&service_quota->quota_event, 0);
7917 + for (i = local->slot_first; i <= local->slot_last; i++) {
7918 + local->slot_queue[state->slot_queue_available++] = i;
7919 + up(&state->slot_available_event);
7922 + state->default_slot_quota = state->slot_queue_available/2;
7923 + state->default_message_quota =
7924 + min((unsigned short)(state->default_slot_quota * 256),
7925 + (unsigned short)~0);
7927 + state->previous_data_index = -1;
7928 + state->data_use_count = 0;
7929 + state->data_quota = state->slot_queue_available - 1;
7931 + local->trigger.event = &state->trigger_event;
7932 + remote_event_create(&local->trigger);
7933 + local->tx_pos = 0;
7935 + local->recycle.event = &state->recycle_event;
7936 + remote_event_create(&local->recycle);
7937 + local->slot_queue_recycle = state->slot_queue_available;
7939 + local->sync_trigger.event = &state->sync_trigger_event;
7940 + remote_event_create(&local->sync_trigger);
7942 + local->sync_release.event = &state->sync_release_event;
7943 + remote_event_create(&local->sync_release);
7945 + /* At start-of-day, the slot is empty and available */
7946 + ((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
7947 + = VCHIQ_MSGID_PADDING;
7948 + remote_event_signal_local(&local->sync_release);
7950 + local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
7952 + status = vchiq_platform_init_state(state);
7955 + bring up slot handler thread
7957 + snprintf(threadname, sizeof(threadname), "VCHIQ-%d", state->id);
7958 + state->slot_handler_thread = kthread_create(&slot_handler_func,
7962 + if (state->slot_handler_thread == NULL) {
7963 + vchiq_loud_error_header();
7964 + vchiq_loud_error("couldn't create thread %s", threadname);
7965 + vchiq_loud_error_footer();
7966 + return VCHIQ_ERROR;
7968 + set_user_nice(state->slot_handler_thread, -19);
7969 + wake_up_process(state->slot_handler_thread);
7971 + snprintf(threadname, sizeof(threadname), "VCHIQr-%d", state->id);
7972 + state->recycle_thread = kthread_create(&recycle_func,
7975 + if (state->recycle_thread == NULL) {
7976 + vchiq_loud_error_header();
7977 + vchiq_loud_error("couldn't create thread %s", threadname);
7978 + vchiq_loud_error_footer();
7979 + return VCHIQ_ERROR;
7981 + set_user_nice(state->recycle_thread, -19);
7982 + wake_up_process(state->recycle_thread);
7984 + snprintf(threadname, sizeof(threadname), "VCHIQs-%d", state->id);
7985 + state->sync_thread = kthread_create(&sync_func,
7988 + if (state->sync_thread == NULL) {
7989 + vchiq_loud_error_header();
7990 + vchiq_loud_error("couldn't create thread %s", threadname);
7991 + vchiq_loud_error_footer();
7992 + return VCHIQ_ERROR;
7994 + set_user_nice(state->sync_thread, -20);
7995 + wake_up_process(state->sync_thread);
7997 + BUG_ON(state->id >= VCHIQ_MAX_STATES);
7998 + vchiq_states[state->id] = state;
8000 + /* Indicate readiness to the other side */
8001 + local->initialised = 1;
8006 +/* Called from application thread when a client or server service is created. */
8008 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
8009 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
8010 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
8012 + VCHIQ_SERVICE_T *service;
8014 + service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
8016 + service->base.fourcc = params->fourcc;
8017 + service->base.callback = params->callback;
8018 + service->base.userdata = params->userdata;
8019 + service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
8020 + service->ref_count = 1;
8021 + service->srvstate = VCHIQ_SRVSTATE_FREE;
8022 + service->userdata_term = userdata_term;
8023 + service->localport = VCHIQ_PORT_FREE;
8024 + service->remoteport = VCHIQ_PORT_FREE;
8026 + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
8027 + VCHIQ_FOURCC_INVALID : params->fourcc;
8028 + service->client_id = 0;
8029 + service->auto_close = 1;
8030 + service->sync = 0;
8031 + service->closing = 0;
8032 + atomic_set(&service->poll_flags, 0);
8033 + service->version = params->version;
8034 + service->version_min = params->version_min;
8035 + service->state = state;
8036 + service->instance = instance;
8037 + service->service_use_count = 0;
8038 + init_bulk_queue(&service->bulk_tx);
8039 + init_bulk_queue(&service->bulk_rx);
8040 + sema_init(&service->remove_event, 0);
8041 + sema_init(&service->bulk_remove_event, 0);
8042 + mutex_init(&service->bulk_mutex);
8043 + memset(&service->stats, 0, sizeof(service->stats));
8045 + vchiq_log_error(vchiq_core_log_level,
8050 + VCHIQ_SERVICE_T **pservice = NULL;
8053 + /* Although it is perfectly possible to use service_spinlock
8054 + ** to protect the creation of services, it is overkill as it
8055 + ** disables interrupts while the array is searched.
8056 + ** The only danger is of another thread trying to create a
8057 + ** service - service deletion is safe.
8058 + ** Therefore it is preferable to use state->mutex which,
8059 + ** although slower to claim, doesn't block interrupts while
8063 + mutex_lock(&state->mutex);
8065 + /* Prepare to use a previously unused service */
8066 + if (state->unused_service < VCHIQ_MAX_SERVICES)
8067 + pservice = &state->services[state->unused_service];
8069 + if (srvstate == VCHIQ_SRVSTATE_OPENING) {
8070 + for (i = 0; i < state->unused_service; i++) {
8071 + VCHIQ_SERVICE_T *srv = state->services[i];
8073 + pservice = &state->services[i];
8078 + for (i = (state->unused_service - 1); i >= 0; i--) {
8079 + VCHIQ_SERVICE_T *srv = state->services[i];
8081 + pservice = &state->services[i];
8082 + else if ((srv->public_fourcc == params->fourcc)
8083 + && ((srv->instance != instance) ||
8084 + (srv->base.callback !=
8085 + params->callback))) {
8086 + /* There is another server using this
8087 + ** fourcc which doesn't match. */
8095 + service->localport = (pservice - state->services);
8097 + handle_seq = VCHIQ_MAX_STATES *
8098 + VCHIQ_MAX_SERVICES;
8099 + service->handle = handle_seq |
8100 + (state->id * VCHIQ_MAX_SERVICES) |
8101 + service->localport;
8102 + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
8103 + *pservice = service;
8104 + if (pservice == &state->services[state->unused_service])
8105 + state->unused_service++;
8108 + mutex_unlock(&state->mutex);
8117 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8118 + &state->service_quotas[service->localport];
8119 + service_quota->slot_quota = state->default_slot_quota;
8120 + service_quota->message_quota = state->default_message_quota;
8121 + if (service_quota->slot_use_count == 0)
8122 + service_quota->previous_tx_index =
8123 + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
8126 + /* Bring this service online */
8127 + vchiq_set_service_state(service, srvstate);
8129 + vchiq_log_info(vchiq_core_msg_log_level,
8130 + "%s Service %c%c%c%c SrcPort:%d",
8131 + (srvstate == VCHIQ_SRVSTATE_OPENING)
8133 + VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
8134 + service->localport);
8137 + /* Don't unlock the service - leave it with a ref_count of 1. */
8143 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
8145 + struct vchiq_open_payload payload = {
8146 + service->base.fourcc,
8149 + service->version_min
8151 + VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
8152 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8154 + service->client_id = client_id;
8155 + vchiq_use_service_internal(service);
8156 + status = queue_message(service->state, NULL,
8157 + VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
8158 + &body, 1, sizeof(payload), 1);
8159 + if (status == VCHIQ_SUCCESS) {
8160 + if (down_interruptible(&service->remove_event) != 0) {
8161 + status = VCHIQ_RETRY;
8162 + vchiq_release_service_internal(service);
8163 + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
8164 + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
8165 + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
8166 + vchiq_log_error(vchiq_core_log_level,
8167 + "%d: osi - srvstate = %s (ref %d)",
8168 + service->state->id,
8169 + srvstate_names[service->srvstate],
8170 + service->ref_count);
8171 + status = VCHIQ_ERROR;
8172 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8173 + vchiq_release_service_internal(service);
8180 +release_service_messages(VCHIQ_SERVICE_T *service)
8182 + VCHIQ_STATE_T *state = service->state;
8183 + int slot_last = state->remote->slot_last;
8186 + /* Release any claimed messages */
8187 + for (i = state->remote->slot_first; i <= slot_last; i++) {
8188 + VCHIQ_SLOT_INFO_T *slot_info =
8189 + SLOT_INFO_FROM_INDEX(state, i);
8190 + if (slot_info->release_count != slot_info->use_count) {
8192 + (char *)SLOT_DATA_FROM_INDEX(state, i);
8193 + unsigned int pos, end;
8195 + end = VCHIQ_SLOT_SIZE;
8196 + if (data == state->rx_data)
8197 + /* This buffer is still being read from - stop
8198 + ** at the current read position */
8199 + end = state->rx_pos & VCHIQ_SLOT_MASK;
8203 + while (pos < end) {
8204 + VCHIQ_HEADER_T *header =
8205 + (VCHIQ_HEADER_T *)(data + pos);
8206 + int msgid = header->msgid;
8207 + int port = VCHIQ_MSG_DSTPORT(msgid);
8208 + if ((port == service->localport) &&
8209 + (msgid & VCHIQ_MSGID_CLAIMED)) {
8210 + vchiq_log_info(vchiq_core_log_level,
8212 + (unsigned int)header);
8213 + release_slot(state, slot_info, header,
8216 + pos += calc_stride(header->size);
8217 + if (pos > VCHIQ_SLOT_SIZE) {
8218 + vchiq_log_error(vchiq_core_log_level,
8219 + "fsi - pos %x: header %x, "
8220 + "msgid %x, header->msgid %x, "
8221 + "header->size %x",
8222 + pos, (unsigned int)header,
8223 + msgid, header->msgid,
8225 + WARN(1, "invalid slot position\n");
8233 +do_abort_bulks(VCHIQ_SERVICE_T *service)
8235 + VCHIQ_STATUS_T status;
8237 + /* Abort any outstanding bulk transfers */
8238 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
8240 + abort_outstanding_bulks(service, &service->bulk_tx);
8241 + abort_outstanding_bulks(service, &service->bulk_rx);
8242 + mutex_unlock(&service->bulk_mutex);
8244 + status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
8245 + if (status == VCHIQ_SUCCESS)
8246 + status = notify_bulks(service, &service->bulk_rx,
8247 + 0/*!retry_poll*/);
8248 + return (status == VCHIQ_SUCCESS);
8251 +static VCHIQ_STATUS_T
8252 +close_service_complete(VCHIQ_SERVICE_T *service, int failstate)
8254 + VCHIQ_STATUS_T status;
8255 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8258 + switch (service->srvstate) {
8259 + case VCHIQ_SRVSTATE_OPEN:
8260 + case VCHIQ_SRVSTATE_CLOSESENT:
8261 + case VCHIQ_SRVSTATE_CLOSERECVD:
8263 + if (service->auto_close) {
8264 + service->client_id = 0;
8265 + service->remoteport = VCHIQ_PORT_FREE;
8266 + newstate = VCHIQ_SRVSTATE_LISTENING;
8268 + newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
8270 + newstate = VCHIQ_SRVSTATE_CLOSED;
8271 + vchiq_set_service_state(service, newstate);
8273 + case VCHIQ_SRVSTATE_LISTENING:
8276 + vchiq_log_error(vchiq_core_log_level,
8277 + "close_service_complete(%x) called in state %s",
8278 + service->handle, srvstate_names[service->srvstate]);
8279 + WARN(1, "close_service_complete in unexpected state\n");
8280 + return VCHIQ_ERROR;
8283 + status = make_service_callback(service,
8284 + VCHIQ_SERVICE_CLOSED, NULL, NULL);
8286 + if (status != VCHIQ_RETRY) {
8287 + int uc = service->service_use_count;
8289 + /* Complete the close process */
8290 + for (i = 0; i < uc; i++)
8291 + /* cater for cases where close is forced and the
8292 + ** client may not close all it's handles */
8293 + vchiq_release_service_internal(service);
8295 + service->client_id = 0;
8296 + service->remoteport = VCHIQ_PORT_FREE;
8298 + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
8299 + vchiq_free_service_internal(service);
8300 + else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
8302 + service->closing = 0;
8304 + up(&service->remove_event);
8307 + vchiq_set_service_state(service, failstate);
8312 +/* Called by the slot handler */
8314 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
8316 + VCHIQ_STATE_T *state = service->state;
8317 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8318 + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
8320 + vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
8321 + service->state->id, service->localport, close_recvd,
8322 + srvstate_names[service->srvstate]);
8324 + switch (service->srvstate) {
8325 + case VCHIQ_SRVSTATE_CLOSED:
8326 + case VCHIQ_SRVSTATE_HIDDEN:
8327 + case VCHIQ_SRVSTATE_LISTENING:
8328 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8330 + vchiq_log_error(vchiq_core_log_level,
8331 + "vchiq_close_service_internal(1) called "
8333 + srvstate_names[service->srvstate]);
8334 + else if (is_server) {
8335 + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
8336 + status = VCHIQ_ERROR;
8338 + service->client_id = 0;
8339 + service->remoteport = VCHIQ_PORT_FREE;
8340 + if (service->srvstate ==
8341 + VCHIQ_SRVSTATE_CLOSEWAIT)
8342 + vchiq_set_service_state(service,
8343 + VCHIQ_SRVSTATE_LISTENING);
8345 + up(&service->remove_event);
8347 + vchiq_free_service_internal(service);
8349 + case VCHIQ_SRVSTATE_OPENING:
8350 + if (close_recvd) {
8351 + /* The open was rejected - tell the user */
8352 + vchiq_set_service_state(service,
8353 + VCHIQ_SRVSTATE_CLOSEWAIT);
8354 + up(&service->remove_event);
8356 + /* Shutdown mid-open - let the other side know */
8357 + status = queue_message(state, service,
8360 + service->localport,
8361 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8366 + case VCHIQ_SRVSTATE_OPENSYNC:
8367 + mutex_lock(&state->sync_mutex);
8368 + /* Drop through */
8370 + case VCHIQ_SRVSTATE_OPEN:
8371 + if (state->is_master || close_recvd) {
8372 + if (!do_abort_bulks(service))
8373 + status = VCHIQ_RETRY;
8376 + release_service_messages(service);
8378 + if (status == VCHIQ_SUCCESS)
8379 + status = queue_message(state, service,
8382 + service->localport,
8383 + VCHIQ_MSG_DSTPORT(service->remoteport)),
8386 + if (status == VCHIQ_SUCCESS) {
8389 + } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
8390 + mutex_unlock(&state->sync_mutex);
8395 + status = close_service_complete(service,
8396 + VCHIQ_SRVSTATE_CLOSERECVD);
8399 + case VCHIQ_SRVSTATE_CLOSESENT:
8401 + /* This happens when a process is killed mid-close */
8404 + if (!state->is_master) {
8405 + if (!do_abort_bulks(service)) {
8406 + status = VCHIQ_RETRY;
8411 + if (status == VCHIQ_SUCCESS)
8412 + status = close_service_complete(service,
8413 + VCHIQ_SRVSTATE_CLOSERECVD);
8416 + case VCHIQ_SRVSTATE_CLOSERECVD:
8417 + if (!close_recvd && is_server)
8418 + /* Force into LISTENING mode */
8419 + vchiq_set_service_state(service,
8420 + VCHIQ_SRVSTATE_LISTENING);
8421 + status = close_service_complete(service,
8422 + VCHIQ_SRVSTATE_CLOSERECVD);
8426 + vchiq_log_error(vchiq_core_log_level,
8427 + "vchiq_close_service_internal(%d) called in state %s",
8428 + close_recvd, srvstate_names[service->srvstate]);
8435 +/* Called from the application process upon process death */
8437 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service)
8439 + VCHIQ_STATE_T *state = service->state;
8441 + vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
8442 + state->id, service->localport, service->remoteport);
8444 + mark_service_closing(service);
8446 + /* Mark the service for removal by the slot handler */
8447 + request_poll(state, service, VCHIQ_POLL_REMOVE);
8450 +/* Called from the slot handler */
8452 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service)
8454 + VCHIQ_STATE_T *state = service->state;
8456 + vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
8457 + state->id, service->localport);
8459 + switch (service->srvstate) {
8460 + case VCHIQ_SRVSTATE_OPENING:
8461 + case VCHIQ_SRVSTATE_CLOSED:
8462 + case VCHIQ_SRVSTATE_HIDDEN:
8463 + case VCHIQ_SRVSTATE_LISTENING:
8464 + case VCHIQ_SRVSTATE_CLOSEWAIT:
8467 + vchiq_log_error(vchiq_core_log_level,
8468 + "%d: fsi - (%d) in state %s",
8469 + state->id, service->localport,
8470 + srvstate_names[service->srvstate]);
8474 + vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
8476 + up(&service->remove_event);
8478 + /* Release the initial lock */
8479 + unlock_service(service);
8483 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8485 + VCHIQ_SERVICE_T *service;
8488 + /* Find all services registered to this client and enable them. */
8490 + while ((service = next_service_by_instance(state, instance,
8492 + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
8493 + vchiq_set_service_state(service,
8494 + VCHIQ_SRVSTATE_LISTENING);
8495 + unlock_service(service);
8498 + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
8499 + if (queue_message(state, NULL,
8500 + VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, 0,
8501 + 0, 1) == VCHIQ_RETRY)
8502 + return VCHIQ_RETRY;
8504 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
8507 + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
8508 + if (down_interruptible(&state->connect) != 0)
8509 + return VCHIQ_RETRY;
8511 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
8512 + up(&state->connect);
8515 + return VCHIQ_SUCCESS;
8519 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance)
8521 + VCHIQ_SERVICE_T *service;
8524 + /* Find all services registered to this client and enable them. */
8526 + while ((service = next_service_by_instance(state, instance,
8528 + (void)vchiq_remove_service(service->handle);
8529 + unlock_service(service);
8532 + return VCHIQ_SUCCESS;
8536 +vchiq_pause_internal(VCHIQ_STATE_T *state)
8538 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8540 + switch (state->conn_state) {
8541 + case VCHIQ_CONNSTATE_CONNECTED:
8542 + /* Request a pause */
8543 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
8544 + request_poll(state, NULL, 0);
8547 + vchiq_log_error(vchiq_core_log_level,
8548 + "vchiq_pause_internal in state %s\n",
8549 + conn_state_names[state->conn_state]);
8550 + status = VCHIQ_ERROR;
8551 + VCHIQ_STATS_INC(state, error_count);
8559 +vchiq_resume_internal(VCHIQ_STATE_T *state)
8561 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8563 + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
8564 + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
8565 + request_poll(state, NULL, 0);
8567 + status = VCHIQ_ERROR;
8568 + VCHIQ_STATS_INC(state, error_count);
8575 +vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
8577 + /* Unregister the service */
8578 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8579 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8582 + return VCHIQ_ERROR;
8584 + vchiq_log_info(vchiq_core_log_level,
8585 + "%d: close_service:%d",
8586 + service->state->id, service->localport);
8588 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8589 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8590 + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
8591 + unlock_service(service);
8592 + return VCHIQ_ERROR;
8595 + mark_service_closing(service);
8597 + if (current == service->state->slot_handler_thread) {
8598 + status = vchiq_close_service_internal(service,
8599 + 0/*!close_recvd*/);
8600 + BUG_ON(status == VCHIQ_RETRY);
8602 + /* Mark the service for termination by the slot handler */
8603 + request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
8607 + if (down_interruptible(&service->remove_event) != 0) {
8608 + status = VCHIQ_RETRY;
8612 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8613 + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
8614 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8617 + vchiq_log_warning(vchiq_core_log_level,
8618 + "%d: close_service:%d - waiting in state %s",
8619 + service->state->id, service->localport,
8620 + srvstate_names[service->srvstate]);
8623 + if ((status == VCHIQ_SUCCESS) &&
8624 + (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
8625 + (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
8626 + status = VCHIQ_ERROR;
8628 + unlock_service(service);
8634 +vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
8636 + /* Unregister the service */
8637 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8638 + VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
8641 + return VCHIQ_ERROR;
8643 + vchiq_log_info(vchiq_core_log_level,
8644 + "%d: remove_service:%d",
8645 + service->state->id, service->localport);
8647 + if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
8648 + unlock_service(service);
8649 + return VCHIQ_ERROR;
8652 + mark_service_closing(service);
8654 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
8655 + (current == service->state->slot_handler_thread)) {
8656 + /* Make it look like a client, because it must be removed and
8657 + not left in the LISTENING state. */
8658 + service->public_fourcc = VCHIQ_FOURCC_INVALID;
8660 + status = vchiq_close_service_internal(service,
8661 + 0/*!close_recvd*/);
8662 + BUG_ON(status == VCHIQ_RETRY);
8664 + /* Mark the service for removal by the slot handler */
8665 + request_poll(service->state, service, VCHIQ_POLL_REMOVE);
8668 + if (down_interruptible(&service->remove_event) != 0) {
8669 + status = VCHIQ_RETRY;
8673 + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
8674 + (service->srvstate == VCHIQ_SRVSTATE_OPEN))
8677 + vchiq_log_warning(vchiq_core_log_level,
8678 + "%d: remove_service:%d - waiting in state %s",
8679 + service->state->id, service->localport,
8680 + srvstate_names[service->srvstate]);
8683 + if ((status == VCHIQ_SUCCESS) &&
8684 + (service->srvstate != VCHIQ_SRVSTATE_FREE))
8685 + status = VCHIQ_ERROR;
8687 + unlock_service(service);
8693 +/* This function may be called by kernel threads or user threads.
8694 + * User threads may receive VCHIQ_RETRY to indicate that a signal has been
8695 + * received and the call should be retried after being returned to user
8697 + * When called in blocking mode, the userdata field points to a bulk_waiter
8701 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
8702 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
8703 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir)
8705 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8706 + VCHIQ_BULK_QUEUE_T *queue;
8707 + VCHIQ_BULK_T *bulk;
8708 + VCHIQ_STATE_T *state;
8709 + struct bulk_waiter *bulk_waiter = NULL;
8710 + const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
8711 + const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
8712 + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
8713 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8716 + (service->srvstate != VCHIQ_SRVSTATE_OPEN) ||
8717 + ((memhandle == VCHI_MEM_HANDLE_INVALID) && (offset == NULL)) ||
8718 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8722 + case VCHIQ_BULK_MODE_NOCALLBACK:
8723 + case VCHIQ_BULK_MODE_CALLBACK:
8725 + case VCHIQ_BULK_MODE_BLOCKING:
8726 + bulk_waiter = (struct bulk_waiter *)userdata;
8727 + sema_init(&bulk_waiter->event, 0);
8728 + bulk_waiter->actual = 0;
8729 + bulk_waiter->bulk = NULL;
8731 + case VCHIQ_BULK_MODE_WAITING:
8732 + bulk_waiter = (struct bulk_waiter *)userdata;
8733 + bulk = bulk_waiter->bulk;
8739 + state = service->state;
8741 + queue = (dir == VCHIQ_BULK_TRANSMIT) ?
8742 + &service->bulk_tx : &service->bulk_rx;
8744 + if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
8745 + status = VCHIQ_RETRY;
8749 + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
8750 + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
8752 + mutex_unlock(&service->bulk_mutex);
8753 + if (down_interruptible(&service->bulk_remove_event)
8755 + status = VCHIQ_RETRY;
8758 + if (mutex_lock_interruptible(&service->bulk_mutex)
8760 + status = VCHIQ_RETRY;
8763 + } while (queue->local_insert == queue->remove +
8764 + VCHIQ_NUM_SERVICE_BULKS);
8767 + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
8769 + bulk->mode = mode;
8771 + bulk->userdata = userdata;
8772 + bulk->size = size;
8773 + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
8775 + if (vchiq_prepare_bulk_data(bulk, memhandle, offset, size, dir) !=
8777 + goto unlock_error_exit;
8781 + vchiq_log_info(vchiq_core_log_level,
8782 + "%d: bt (%d->%d) %cx %x@%x %x",
8784 + service->localport, service->remoteport, dir_char,
8785 + size, (unsigned int)bulk->data, (unsigned int)userdata);
8787 + if (state->is_master) {
8788 + queue->local_insert++;
8789 + if (resolve_bulks(service, queue))
8790 + request_poll(state, service,
8791 + (dir == VCHIQ_BULK_TRANSMIT) ?
8792 + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
8794 + int payload[2] = { (int)bulk->data, bulk->size };
8795 + VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
8797 + status = queue_message(state, NULL,
8798 + VCHIQ_MAKE_MSG(dir_msgtype,
8799 + service->localport, service->remoteport),
8800 + &element, 1, sizeof(payload), 1);
8801 + if (status != VCHIQ_SUCCESS) {
8802 + vchiq_complete_bulk(bulk);
8803 + goto unlock_error_exit;
8805 + queue->local_insert++;
8808 + mutex_unlock(&service->bulk_mutex);
8810 + vchiq_log_trace(vchiq_core_log_level,
8811 + "%d: bt:%d %cx li=%x ri=%x p=%x",
8813 + service->localport, dir_char,
8814 + queue->local_insert, queue->remote_insert, queue->process);
8817 + unlock_service(service);
8819 + status = VCHIQ_SUCCESS;
8821 + if (bulk_waiter) {
8822 + bulk_waiter->bulk = bulk;
8823 + if (down_interruptible(&bulk_waiter->event) != 0)
8824 + status = VCHIQ_RETRY;
8825 + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
8826 + status = VCHIQ_ERROR;
8832 + mutex_unlock(&service->bulk_mutex);
8836 + unlock_service(service);
8841 +vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
8842 + const VCHIQ_ELEMENT_T *elements, unsigned int count)
8844 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8845 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8847 + unsigned int size = 0;
8851 + (vchiq_check_service(service) != VCHIQ_SUCCESS))
8854 + for (i = 0; i < (unsigned int)count; i++) {
8855 + if (elements[i].size) {
8856 + if (elements[i].data == NULL) {
8857 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8860 + size += elements[i].size;
8864 + if (size > VCHIQ_MAX_MSG_SIZE) {
8865 + VCHIQ_SERVICE_STATS_INC(service, error_count);
8869 + switch (service->srvstate) {
8870 + case VCHIQ_SRVSTATE_OPEN:
8871 + status = queue_message(service->state, service,
8872 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8873 + service->localport,
8874 + service->remoteport),
8875 + elements, count, size, 1);
8877 + case VCHIQ_SRVSTATE_OPENSYNC:
8878 + status = queue_message_sync(service->state, service,
8879 + VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
8880 + service->localport,
8881 + service->remoteport),
8882 + elements, count, size, 1);
8885 + status = VCHIQ_ERROR;
8891 + unlock_service(service);
8897 +vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
8899 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8900 + VCHIQ_SHARED_STATE_T *remote;
8901 + VCHIQ_STATE_T *state;
8907 + state = service->state;
8908 + remote = state->remote;
8910 + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
8912 + if ((slot_index >= remote->slot_first) &&
8913 + (slot_index <= remote->slot_last)) {
8914 + int msgid = header->msgid;
8915 + if (msgid & VCHIQ_MSGID_CLAIMED) {
8916 + VCHIQ_SLOT_INFO_T *slot_info =
8917 + SLOT_INFO_FROM_INDEX(state, slot_index);
8919 + release_slot(state, slot_info, header, service);
8921 + } else if (slot_index == remote->slot_sync)
8922 + release_message_sync(state, header);
8924 + unlock_service(service);
8928 +release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
8930 + header->msgid = VCHIQ_MSGID_PADDING;
8932 + remote_event_signal(&state->remote->sync_release);
8936 +vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
8938 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8939 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8942 + (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
8945 + *peer_version = service->peer_version;
8946 + status = VCHIQ_SUCCESS;
8950 + unlock_service(service);
8955 +vchiq_get_config(VCHIQ_INSTANCE_T instance,
8956 + int config_size, VCHIQ_CONFIG_T *pconfig)
8958 + VCHIQ_CONFIG_T config;
8962 + config.max_msg_size = VCHIQ_MAX_MSG_SIZE;
8963 + config.bulk_threshold = VCHIQ_MAX_MSG_SIZE;
8964 + config.max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
8965 + config.max_services = VCHIQ_MAX_SERVICES;
8966 + config.version = VCHIQ_VERSION;
8967 + config.version_min = VCHIQ_VERSION_MIN;
8969 + if (config_size > sizeof(VCHIQ_CONFIG_T))
8970 + return VCHIQ_ERROR;
8972 + memcpy(pconfig, &config,
8973 + min(config_size, (int)(sizeof(VCHIQ_CONFIG_T))));
8975 + return VCHIQ_SUCCESS;
8979 +vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
8980 + VCHIQ_SERVICE_OPTION_T option, int value)
8982 + VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
8983 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
8987 + case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
8988 + service->auto_close = value;
8989 + status = VCHIQ_SUCCESS;
8992 + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
8993 + VCHIQ_SERVICE_QUOTA_T *service_quota =
8994 + &service->state->service_quotas[
8995 + service->localport];
8997 + value = service->state->default_slot_quota;
8998 + if ((value >= service_quota->slot_use_count) &&
8999 + (value < (unsigned short)~0)) {
9000 + service_quota->slot_quota = value;
9001 + if ((value >= service_quota->slot_use_count) &&
9002 + (service_quota->message_quota >=
9003 + service_quota->message_use_count)) {
9004 + /* Signal the service that it may have
9005 + ** dropped below its quota */
9006 + up(&service_quota->quota_event);
9008 + status = VCHIQ_SUCCESS;
9012 + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
9013 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9014 + &service->state->service_quotas[
9015 + service->localport];
9017 + value = service->state->default_message_quota;
9018 + if ((value >= service_quota->message_use_count) &&
9019 + (value < (unsigned short)~0)) {
9020 + service_quota->message_quota = value;
9022 + service_quota->message_use_count) &&
9023 + (service_quota->slot_quota >=
9024 + service_quota->slot_use_count))
9025 + /* Signal the service that it may have
9026 + ** dropped below its quota */
9027 + up(&service_quota->quota_event);
9028 + status = VCHIQ_SUCCESS;
9032 + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
9033 + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
9034 + (service->srvstate ==
9035 + VCHIQ_SRVSTATE_LISTENING)) {
9036 + service->sync = value;
9037 + status = VCHIQ_SUCCESS;
9044 + unlock_service(service);
9051 +vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
9052 + VCHIQ_SHARED_STATE_T *shared, const char *label)
9054 + static const char *const debug_names[] = {
9056 + "SLOT_HANDLER_COUNT",
9057 + "SLOT_HANDLER_LINE",
9061 + "AWAIT_COMPLETION_LINE",
9062 + "DEQUEUE_MESSAGE_LINE",
9063 + "SERVICE_CALLBACK_LINE",
9064 + "MSG_QUEUE_FULL_COUNT",
9065 + "COMPLETION_QUEUE_FULL_COUNT"
9071 + len = snprintf(buf, sizeof(buf),
9072 + " %s: slots %d-%d tx_pos=%x recycle=%x",
9073 + label, shared->slot_first, shared->slot_last,
9074 + shared->tx_pos, shared->slot_queue_recycle);
9075 + vchiq_dump(dump_context, buf, len + 1);
9077 + len = snprintf(buf, sizeof(buf),
9078 + " Slots claimed:");
9079 + vchiq_dump(dump_context, buf, len + 1);
9081 + for (i = shared->slot_first; i <= shared->slot_last; i++) {
9082 + VCHIQ_SLOT_INFO_T slot_info = *SLOT_INFO_FROM_INDEX(state, i);
9083 + if (slot_info.use_count != slot_info.release_count) {
9084 + len = snprintf(buf, sizeof(buf),
9085 + " %d: %d/%d", i, slot_info.use_count,
9086 + slot_info.release_count);
9087 + vchiq_dump(dump_context, buf, len + 1);
9091 + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
9092 + len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
9093 + debug_names[i], shared->debug[i], shared->debug[i]);
9094 + vchiq_dump(dump_context, buf, len + 1);
9099 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
9105 + len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
9106 + conn_state_names[state->conn_state]);
9107 + vchiq_dump(dump_context, buf, len + 1);
9109 + len = snprintf(buf, sizeof(buf),
9110 + " tx_pos=%x(@%x), rx_pos=%x(@%x)",
9111 + state->local->tx_pos,
9112 + (uint32_t)state->tx_data +
9113 + (state->local_tx_pos & VCHIQ_SLOT_MASK),
9115 + (uint32_t)state->rx_data +
9116 + (state->rx_pos & VCHIQ_SLOT_MASK));
9117 + vchiq_dump(dump_context, buf, len + 1);
9119 + len = snprintf(buf, sizeof(buf),
9120 + " Version: %d (min %d)",
9121 + VCHIQ_VERSION, VCHIQ_VERSION_MIN);
9122 + vchiq_dump(dump_context, buf, len + 1);
9124 + if (VCHIQ_ENABLE_STATS) {
9125 + len = snprintf(buf, sizeof(buf),
9126 + " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
9128 + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
9129 + state->stats.error_count);
9130 + vchiq_dump(dump_context, buf, len + 1);
9133 + len = snprintf(buf, sizeof(buf),
9134 + " Slots: %d available (%d data), %d recyclable, %d stalls "
9136 + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
9137 + state->local_tx_pos) / VCHIQ_SLOT_SIZE,
9138 + state->data_quota - state->data_use_count,
9139 + state->local->slot_queue_recycle - state->slot_queue_available,
9140 + state->stats.slot_stalls, state->stats.data_stalls);
9141 + vchiq_dump(dump_context, buf, len + 1);
9143 + vchiq_dump_platform_state(dump_context);
9145 + vchiq_dump_shared_state(dump_context, state, state->local, "Local");
9146 + vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
9148 + vchiq_dump_platform_instances(dump_context);
9150 + for (i = 0; i < state->unused_service; i++) {
9151 + VCHIQ_SERVICE_T *service = find_service_by_port(state, i);
9154 + vchiq_dump_service_state(dump_context, service);
9155 + unlock_service(service);
9161 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
9166 + len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
9167 + service->localport, srvstate_names[service->srvstate],
9168 + service->ref_count - 1); /*Don't include the lock just taken*/
9170 + if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
9171 + char remoteport[30];
9172 + VCHIQ_SERVICE_QUOTA_T *service_quota =
9173 + &service->state->service_quotas[service->localport];
9174 + int fourcc = service->base.fourcc;
9175 + int tx_pending, rx_pending;
9176 + if (service->remoteport != VCHIQ_PORT_FREE) {
9177 + int len2 = snprintf(remoteport, sizeof(remoteport),
9178 + "%d", service->remoteport);
9179 + if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
9180 + snprintf(remoteport + len2,
9181 + sizeof(remoteport) - len2,
9182 + " (client %x)", service->client_id);
9184 + strcpy(remoteport, "n/a");
9186 + len += snprintf(buf + len, sizeof(buf) - len,
9187 + " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
9188 + VCHIQ_FOURCC_AS_4CHARS(fourcc),
9190 + service_quota->message_use_count,
9191 + service_quota->message_quota,
9192 + service_quota->slot_use_count,
9193 + service_quota->slot_quota);
9195 + vchiq_dump(dump_context, buf, len + 1);
9197 + tx_pending = service->bulk_tx.local_insert -
9198 + service->bulk_tx.remote_insert;
9200 + rx_pending = service->bulk_rx.local_insert -
9201 + service->bulk_rx.remote_insert;
9203 + len = snprintf(buf, sizeof(buf),
9204 + " Bulk: tx_pending=%d (size %d),"
9205 + " rx_pending=%d (size %d)",
9207 + tx_pending ? service->bulk_tx.bulks[
9208 + BULK_INDEX(service->bulk_tx.remove)].size : 0,
9210 + rx_pending ? service->bulk_rx.bulks[
9211 + BULK_INDEX(service->bulk_rx.remove)].size : 0);
9213 + if (VCHIQ_ENABLE_STATS) {
9214 + vchiq_dump(dump_context, buf, len + 1);
9216 + len = snprintf(buf, sizeof(buf),
9217 + " Ctrl: tx_count=%d, tx_bytes=%llu, "
9218 + "rx_count=%d, rx_bytes=%llu",
9219 + service->stats.ctrl_tx_count,
9220 + service->stats.ctrl_tx_bytes,
9221 + service->stats.ctrl_rx_count,
9222 + service->stats.ctrl_rx_bytes);
9223 + vchiq_dump(dump_context, buf, len + 1);
9225 + len = snprintf(buf, sizeof(buf),
9226 + " Bulk: tx_count=%d, tx_bytes=%llu, "
9227 + "rx_count=%d, rx_bytes=%llu",
9228 + service->stats.bulk_tx_count,
9229 + service->stats.bulk_tx_bytes,
9230 + service->stats.bulk_rx_count,
9231 + service->stats.bulk_rx_bytes);
9232 + vchiq_dump(dump_context, buf, len + 1);
9234 + len = snprintf(buf, sizeof(buf),
9235 + " %d quota stalls, %d slot stalls, "
9236 + "%d bulk stalls, %d aborted, %d errors",
9237 + service->stats.quota_stalls,
9238 + service->stats.slot_stalls,
9239 + service->stats.bulk_stalls,
9240 + service->stats.bulk_aborted_count,
9241 + service->stats.error_count);
9245 + vchiq_dump(dump_context, buf, len + 1);
9247 + if (service->srvstate != VCHIQ_SRVSTATE_FREE)
9248 + vchiq_dump_platform_service_state(dump_context, service);
9253 +vchiq_loud_error_header(void)
9255 + vchiq_log_error(vchiq_core_log_level,
9256 + "============================================================"
9257 + "================");
9258 + vchiq_log_error(vchiq_core_log_level,
9259 + "============================================================"
9260 + "================");
9261 + vchiq_log_error(vchiq_core_log_level, "=====");
9265 +vchiq_loud_error_footer(void)
9267 + vchiq_log_error(vchiq_core_log_level, "=====");
9268 + vchiq_log_error(vchiq_core_log_level,
9269 + "============================================================"
9270 + "================");
9271 + vchiq_log_error(vchiq_core_log_level,
9272 + "============================================================"
9273 + "================");
9277 +VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
9279 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9280 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9281 + status = queue_message(state, NULL,
9282 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
9287 +VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
9289 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9290 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9291 + status = queue_message(state, NULL,
9292 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
9297 +VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
9299 + VCHIQ_STATUS_T status = VCHIQ_RETRY;
9300 + if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
9301 + status = queue_message(state, NULL,
9302 + VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
9307 +void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
9310 + const uint8_t *mem = (const uint8_t *)voidMem;
9312 + char lineBuf[100];
9315 + while (numBytes > 0) {
9318 + for (offset = 0; offset < 16; offset++) {
9319 + if (offset < numBytes)
9320 + s += snprintf(s, 4, "%02x ", mem[offset]);
9322 + s += snprintf(s, 4, " ");
9325 + for (offset = 0; offset < 16; offset++) {
9326 + if (offset < numBytes) {
9327 + uint8_t ch = mem[offset];
9329 + if ((ch < ' ') || (ch > '~'))
9336 + if ((label != NULL) && (*label != '\0'))
9337 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9338 + "%s: %08x: %s", label, addr, lineBuf);
9340 + vchiq_log_trace(VCHIQ_LOG_TRACE,
9341 + "%08x: %s", addr, lineBuf);
9345 + if (numBytes > 16)
9352 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h
9355 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
9357 + * Redistribution and use in source and binary forms, with or without
9358 + * modification, are permitted provided that the following conditions
9360 + * 1. Redistributions of source code must retain the above copyright
9361 + * notice, this list of conditions, and the following disclaimer,
9362 + * without modification.
9363 + * 2. Redistributions in binary form must reproduce the above copyright
9364 + * notice, this list of conditions and the following disclaimer in the
9365 + * documentation and/or other materials provided with the distribution.
9366 + * 3. The names of the above-listed copyright holders may not be used
9367 + * to endorse or promote products derived from this software without
9368 + * specific prior written permission.
9370 + * ALTERNATIVELY, this software may be distributed under the terms of the
9371 + * GNU General Public License ("GPL") version 2, as published by the Free
9372 + * Software Foundation.
9374 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9375 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
9376 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
9377 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
9378 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
9379 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
9380 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
9381 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
9382 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
9383 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9384 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9387 +#ifndef VCHIQ_CORE_H
9388 +#define VCHIQ_CORE_H
9390 +#include <linux/mutex.h>
9391 +#include <linux/semaphore.h>
9392 +#include <linux/kthread.h>
9394 +#include "vchiq_cfg.h"
9398 +/* Run time control of log level, based on KERN_XXX level. */
9399 +#define VCHIQ_LOG_DEFAULT 4
9400 +#define VCHIQ_LOG_ERROR 3
9401 +#define VCHIQ_LOG_WARNING 4
9402 +#define VCHIQ_LOG_INFO 6
9403 +#define VCHIQ_LOG_TRACE 7
9405 +#define VCHIQ_LOG_PREFIX KERN_INFO "vchiq: "
9407 +#ifndef vchiq_log_error
9408 +#define vchiq_log_error(cat, fmt, ...) \
9409 + do { if (cat >= VCHIQ_LOG_ERROR) \
9410 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9412 +#ifndef vchiq_log_warning
9413 +#define vchiq_log_warning(cat, fmt, ...) \
9414 + do { if (cat >= VCHIQ_LOG_WARNING) \
9415 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9417 +#ifndef vchiq_log_info
9418 +#define vchiq_log_info(cat, fmt, ...) \
9419 + do { if (cat >= VCHIQ_LOG_INFO) \
9420 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9422 +#ifndef vchiq_log_trace
9423 +#define vchiq_log_trace(cat, fmt, ...) \
9424 + do { if (cat >= VCHIQ_LOG_TRACE) \
9425 + printk(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
9428 +#define vchiq_loud_error(...) \
9429 + vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
9431 +#ifndef vchiq_static_assert
9432 +#define vchiq_static_assert(cond) __attribute__((unused)) \
9433 + extern int vchiq_static_assert[(cond) ? 1 : -1]
9436 +#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
9438 +/* Ensure that the slot size and maximum number of slots are powers of 2 */
9439 +vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
9440 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
9441 +vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
9443 +#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
9444 +#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
9445 +#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
9446 + VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
9448 +#define VCHIQ_MSG_PADDING 0 /* - */
9449 +#define VCHIQ_MSG_CONNECT 1 /* - */
9450 +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
9451 +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
9452 +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
9453 +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
9454 +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
9455 +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
9456 +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
9457 +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
9458 +#define VCHIQ_MSG_PAUSE 10 /* - */
9459 +#define VCHIQ_MSG_RESUME 11 /* - */
9460 +#define VCHIQ_MSG_REMOTE_USE 12 /* - */
9461 +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
9462 +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
9464 +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
9465 +#define VCHIQ_PORT_FREE 0x1000
9466 +#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
9467 +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
9468 + ((type<<24) | (srcport<<12) | (dstport<<0))
9469 +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
9470 +#define VCHIQ_MSG_SRCPORT(msgid) \
9471 + (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
9472 +#define VCHIQ_MSG_DSTPORT(msgid) \
9473 + ((unsigned short)msgid & 0xfff)
9475 +#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
9476 + ((fourcc) >> 24) & 0xff, \
9477 + ((fourcc) >> 16) & 0xff, \
9478 + ((fourcc) >> 8) & 0xff, \
9481 +/* Ensure the fields are wide enough */
9482 +vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
9484 +vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
9485 +vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
9486 + (unsigned int)VCHIQ_PORT_FREE);
9488 +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
9489 +#define VCHIQ_MSGID_CLAIMED 0x40000000
9491 +#define VCHIQ_FOURCC_INVALID 0x00000000
9492 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
9494 +#define VCHIQ_BULK_ACTUAL_ABORTED -1
9496 +typedef uint32_t BITSET_T;
9498 +vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
9500 +#define BITSET_SIZE(b) ((b + 31) >> 5)
9501 +#define BITSET_WORD(b) (b >> 5)
9502 +#define BITSET_BIT(b) (1 << (b & 31))
9503 +#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
9504 +#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
9505 +#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
9506 +#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
9508 +#if VCHIQ_ENABLE_STATS
9509 +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
9510 +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
9511 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
9512 + (service->stats. stat += addend)
9514 +#define VCHIQ_STATS_INC(state, stat) ((void)0)
9515 +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
9516 +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
9521 +#if VCHIQ_ENABLE_DEBUG
9522 + DEBUG_SLOT_HANDLER_COUNT,
9523 + DEBUG_SLOT_HANDLER_LINE,
9525 + DEBUG_PARSE_HEADER,
9526 + DEBUG_PARSE_MSGID,
9527 + DEBUG_AWAIT_COMPLETION_LINE,
9528 + DEBUG_DEQUEUE_MESSAGE_LINE,
9529 + DEBUG_SERVICE_CALLBACK_LINE,
9530 + DEBUG_MSG_QUEUE_FULL_COUNT,
9531 + DEBUG_COMPLETION_QUEUE_FULL_COUNT,
9536 +#if VCHIQ_ENABLE_DEBUG
9538 +#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
9539 +#define DEBUG_TRACE(d) \
9540 + do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
9541 +#define DEBUG_VALUE(d, v) \
9542 + do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
9543 +#define DEBUG_COUNT(d) \
9544 + do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
9546 +#else /* VCHIQ_ENABLE_DEBUG */
9548 +#define DEBUG_INITIALISE(local)
9549 +#define DEBUG_TRACE(d)
9550 +#define DEBUG_VALUE(d, v)
9551 +#define DEBUG_COUNT(d)
9553 +#endif /* VCHIQ_ENABLE_DEBUG */
9556 + VCHIQ_CONNSTATE_DISCONNECTED,
9557 + VCHIQ_CONNSTATE_CONNECTING,
9558 + VCHIQ_CONNSTATE_CONNECTED,
9559 + VCHIQ_CONNSTATE_PAUSING,
9560 + VCHIQ_CONNSTATE_PAUSE_SENT,
9561 + VCHIQ_CONNSTATE_PAUSED,
9562 + VCHIQ_CONNSTATE_RESUMING,
9563 + VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
9564 + VCHIQ_CONNSTATE_RESUME_TIMEOUT
9565 +} VCHIQ_CONNSTATE_T;
9568 + VCHIQ_SRVSTATE_FREE,
9569 + VCHIQ_SRVSTATE_HIDDEN,
9570 + VCHIQ_SRVSTATE_LISTENING,
9571 + VCHIQ_SRVSTATE_OPENING,
9572 + VCHIQ_SRVSTATE_OPEN,
9573 + VCHIQ_SRVSTATE_OPENSYNC,
9574 + VCHIQ_SRVSTATE_CLOSESENT,
9575 + VCHIQ_SRVSTATE_CLOSERECVD,
9576 + VCHIQ_SRVSTATE_CLOSEWAIT,
9577 + VCHIQ_SRVSTATE_CLOSED
9581 + VCHIQ_POLL_TERMINATE,
9582 + VCHIQ_POLL_REMOVE,
9583 + VCHIQ_POLL_TXNOTIFY,
9584 + VCHIQ_POLL_RXNOTIFY,
9589 + VCHIQ_BULK_TRANSMIT,
9590 + VCHIQ_BULK_RECEIVE
9591 +} VCHIQ_BULK_DIR_T;
9593 +typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
9595 +typedef struct vchiq_bulk_struct {
9599 + VCHI_MEM_HANDLE_T handle;
9602 + void *remote_data;
9607 +typedef struct vchiq_bulk_queue_struct {
9608 + int local_insert; /* Where to insert the next local bulk */
9609 + int remote_insert; /* Where to insert the next remote bulk (master) */
9610 + int process; /* Bulk to transfer next */
9611 + int remote_notify; /* Bulk to notify the remote client of next (mstr) */
9612 + int remove; /* Bulk to notify the local client of, and remove,
9614 + VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
9615 +} VCHIQ_BULK_QUEUE_T;
9617 +typedef struct remote_event_struct {
9620 + struct semaphore *event;
9623 +typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
9625 +typedef struct vchiq_state_struct VCHIQ_STATE_T;
9627 +typedef struct vchiq_slot_struct {
9628 + char data[VCHIQ_SLOT_SIZE];
9631 +typedef struct vchiq_slot_info_struct {
9632 + /* Use two counters rather than one to avoid the need for a mutex. */
9634 + short release_count;
9635 +} VCHIQ_SLOT_INFO_T;
9637 +typedef struct vchiq_service_struct {
9638 + VCHIQ_SERVICE_BASE_T base;
9639 + VCHIQ_SERVICE_HANDLE_T handle;
9640 + unsigned int ref_count;
9642 + VCHIQ_USERDATA_TERM_T userdata_term;
9643 + unsigned int localport;
9644 + unsigned int remoteport;
9645 + int public_fourcc;
9650 + atomic_t poll_flags;
9652 + short version_min;
9653 + short peer_version;
9655 + VCHIQ_STATE_T *state;
9656 + VCHIQ_INSTANCE_T instance;
9658 + int service_use_count;
9660 + VCHIQ_BULK_QUEUE_T bulk_tx;
9661 + VCHIQ_BULK_QUEUE_T bulk_rx;
9663 + struct semaphore remove_event;
9664 + struct semaphore bulk_remove_event;
9665 + struct mutex bulk_mutex;
9667 + struct service_stats_struct {
9672 + int ctrl_tx_count;
9673 + int ctrl_rx_count;
9674 + int bulk_tx_count;
9675 + int bulk_rx_count;
9676 + int bulk_aborted_count;
9677 + uint64_t ctrl_tx_bytes;
9678 + uint64_t ctrl_rx_bytes;
9679 + uint64_t bulk_tx_bytes;
9680 + uint64_t bulk_rx_bytes;
9684 +/* The quota information is outside VCHIQ_SERVICE_T so that it can be
9685 + statically allocated, since for accounting reasons a service's slot
9686 + usage is carried over between users of the same port number.
9688 +typedef struct vchiq_service_quota_struct {
9689 + unsigned short slot_quota;
9690 + unsigned short slot_use_count;
9691 + unsigned short message_quota;
9692 + unsigned short message_use_count;
9693 + struct semaphore quota_event;
9694 + int previous_tx_index;
9695 +} VCHIQ_SERVICE_QUOTA_T;
9697 +typedef struct vchiq_shared_state_struct {
9699 + /* A non-zero value here indicates that the content is valid. */
9702 + /* The first and last (inclusive) slots allocated to the owner. */
9706 + /* The slot allocated to synchronous messages from the owner. */
9709 + /* Signalling this event indicates that owner's slot handler thread
9711 + REMOTE_EVENT_T trigger;
9713 + /* Indicates the byte position within the stream where the next message
9714 + ** will be written. The least significant bits are an index into the
9715 + ** slot. The next bits are the index of the slot in slot_queue. */
9718 + /* This event should be signalled when a slot is recycled. */
9719 + REMOTE_EVENT_T recycle;
9721 + /* The slot_queue index where the next recycled slot will be written. */
9722 + int slot_queue_recycle;
9724 + /* This event should be signalled when a synchronous message is sent. */
9725 + REMOTE_EVENT_T sync_trigger;
9727 + /* This event should be signalled when a synchronous message has been
9729 + REMOTE_EVENT_T sync_release;
9731 + /* A circular buffer of slot indexes. */
9732 + int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
9734 + /* Debugging state */
9735 + int debug[DEBUG_MAX];
9736 +} VCHIQ_SHARED_STATE_T;
9738 +typedef struct vchiq_slot_zero_struct {
9741 + short version_min;
9742 + int slot_zero_size;
9745 + int max_slots_per_side;
9746 + int platform_data[2];
9747 + VCHIQ_SHARED_STATE_T master;
9748 + VCHIQ_SHARED_STATE_T slave;
9749 + VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
9750 +} VCHIQ_SLOT_ZERO_T;
9752 +struct vchiq_state_struct {
9755 + VCHIQ_CONNSTATE_T conn_state;
9758 + VCHIQ_SHARED_STATE_T *local;
9759 + VCHIQ_SHARED_STATE_T *remote;
9760 + VCHIQ_SLOT_T *slot_data;
9762 + unsigned short default_slot_quota;
9763 + unsigned short default_message_quota;
9765 + /* Event indicating connect message received */
9766 + struct semaphore connect;
9768 + /* Mutex protecting services */
9769 + struct mutex mutex;
9770 + VCHIQ_INSTANCE_T *instance;
9772 + /* Processes incoming messages */
9773 + struct task_struct *slot_handler_thread;
9775 + /* Processes recycled slots */
9776 + struct task_struct *recycle_thread;
9778 + /* Processes synchronous messages */
9779 + struct task_struct *sync_thread;
9781 + /* Local implementation of the trigger remote event */
9782 + struct semaphore trigger_event;
9784 + /* Local implementation of the recycle remote event */
9785 + struct semaphore recycle_event;
9787 + /* Local implementation of the sync trigger remote event */
9788 + struct semaphore sync_trigger_event;
9790 + /* Local implementation of the sync release remote event */
9791 + struct semaphore sync_release_event;
9795 + VCHIQ_SLOT_INFO_T *rx_info;
9797 + struct mutex slot_mutex;
9799 + struct mutex recycle_mutex;
9801 + struct mutex sync_mutex;
9803 + struct mutex bulk_transfer_mutex;
9805 + /* Indicates the byte position within the stream from where the next
9806 + ** message will be read. The least significant bits are an index into
9807 + ** the slot.The next bits are the index of the slot in
9808 + ** remote->slot_queue. */
9811 + /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
9812 + from remote->tx_pos. */
9815 + /* The slot_queue index of the slot to become available next. */
9816 + int slot_queue_available;
9818 + /* A flag to indicate if any poll has been requested */
9821 + /* Ths index of the previous slot used for data messages. */
9822 + int previous_data_index;
9824 + /* The number of slots occupied by data messages. */
9825 + unsigned short data_use_count;
9827 + /* The maximum number of slots to be occupied by data messages. */
9828 + unsigned short data_quota;
9830 + /* An array of bit sets indicating which services must be polled. */
9831 + atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
9833 + /* The number of the first unused service */
9834 + int unused_service;
9836 + /* Signalled when a free slot becomes available. */
9837 + struct semaphore slot_available_event;
9839 + struct semaphore slot_remove_event;
9841 + /* Signalled when a free data slot becomes available. */
9842 + struct semaphore data_quota_event;
9844 + /* Incremented when there are bulk transfers which cannot be processed
9845 + * whilst paused and must be processed on resume */
9846 + int deferred_bulks;
9848 + struct state_stats_struct {
9851 + int ctrl_tx_count;
9852 + int ctrl_rx_count;
9856 + VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
9857 + VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
9858 + VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
9860 + VCHIQ_PLATFORM_STATE_T platform_state;
9863 +struct bulk_waiter {
9864 + VCHIQ_BULK_T *bulk;
9865 + struct semaphore event;
9869 +extern spinlock_t bulk_waiter_spinlock;
9871 +extern int vchiq_core_log_level;
9872 +extern int vchiq_core_msg_log_level;
9873 +extern int vchiq_sync_log_level;
9875 +extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
9877 +extern const char *
9878 +get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
9880 +extern VCHIQ_SLOT_ZERO_T *
9881 +vchiq_init_slots(void *mem_base, int mem_size);
9883 +extern VCHIQ_STATUS_T
9884 +vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
9887 +extern VCHIQ_STATUS_T
9888 +vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9890 +extern VCHIQ_SERVICE_T *
9891 +vchiq_add_service_internal(VCHIQ_STATE_T *state,
9892 + const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
9893 + VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
9895 +extern VCHIQ_STATUS_T
9896 +vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
9898 +extern VCHIQ_STATUS_T
9899 +vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
9902 +vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
9905 +vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
9907 +extern VCHIQ_STATUS_T
9908 +vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
9910 +extern VCHIQ_STATUS_T
9911 +vchiq_pause_internal(VCHIQ_STATE_T *state);
9913 +extern VCHIQ_STATUS_T
9914 +vchiq_resume_internal(VCHIQ_STATE_T *state);
9917 +remote_event_pollall(VCHIQ_STATE_T *state);
9919 +extern VCHIQ_STATUS_T
9920 +vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
9921 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
9922 + VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
9925 +vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
9928 +vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
9931 +vchiq_loud_error_header(void);
9934 +vchiq_loud_error_footer(void);
9937 +request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
9939 +static inline VCHIQ_SERVICE_T *
9940 +handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
9942 + VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
9943 + (VCHIQ_MAX_STATES - 1)];
9947 + return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
9950 +extern VCHIQ_SERVICE_T *
9951 +find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
9953 +extern VCHIQ_SERVICE_T *
9954 +find_service_by_port(VCHIQ_STATE_T *state, int localport);
9956 +extern VCHIQ_SERVICE_T *
9957 +find_service_for_instance(VCHIQ_INSTANCE_T instance,
9958 + VCHIQ_SERVICE_HANDLE_T handle);
9960 +extern VCHIQ_SERVICE_T *
9961 +next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
9965 +lock_service(VCHIQ_SERVICE_T *service);
9968 +unlock_service(VCHIQ_SERVICE_T *service);
9970 +/* The following functions are called from vchiq_core, and external
9971 +** implementations must be provided. */
9973 +extern VCHIQ_STATUS_T
9974 +vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
9975 + VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
9978 +vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
9981 +vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
9983 +extern VCHIQ_STATUS_T
9984 +vchiq_copy_from_user(void *dst, const void *src, int size);
9987 +remote_event_signal(REMOTE_EVENT_T *event);
9990 +vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
9993 +vchiq_platform_paused(VCHIQ_STATE_T *state);
9995 +extern VCHIQ_STATUS_T
9996 +vchiq_platform_resume(VCHIQ_STATE_T *state);
9999 +vchiq_platform_resumed(VCHIQ_STATE_T *state);
10002 +vchiq_dump(void *dump_context, const char *str, int len);
10005 +vchiq_dump_platform_state(void *dump_context);
10008 +vchiq_dump_platform_instances(void *dump_context);
10011 +vchiq_dump_platform_service_state(void *dump_context,
10012 + VCHIQ_SERVICE_T *service);
10014 +extern VCHIQ_STATUS_T
10015 +vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
10017 +extern VCHIQ_STATUS_T
10018 +vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
10021 +vchiq_on_remote_use(VCHIQ_STATE_T *state);
10024 +vchiq_on_remote_release(VCHIQ_STATE_T *state);
10026 +extern VCHIQ_STATUS_T
10027 +vchiq_platform_init_state(VCHIQ_STATE_T *state);
10029 +extern VCHIQ_STATUS_T
10030 +vchiq_check_service(VCHIQ_SERVICE_T *service);
10033 +vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
10035 +extern VCHIQ_STATUS_T
10036 +vchiq_send_remote_use(VCHIQ_STATE_T *state);
10038 +extern VCHIQ_STATUS_T
10039 +vchiq_send_remote_release(VCHIQ_STATE_T *state);
10041 +extern VCHIQ_STATUS_T
10042 +vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
10045 +vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
10046 + VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
10049 +vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
10052 +vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
10056 +vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
10057 + size_t numBytes);
10061 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion
10063 +#!/usr/bin/perl -w
10068 +# Generate a version from available information
10071 +my $prefix = shift @ARGV;
10072 +my $root = shift @ARGV;
10075 +if ( not defined $root ) {
10076 + die "usage: $0 prefix root-dir\n";
10079 +if ( ! -d $root ) {
10080 + die "root directory $root not found\n";
10083 +my $version = "unknown";
10086 +if ( -d "$root/.git" ) {
10087 + # attempt to work out git version. only do so
10088 + # on a linux build host, as cygwin builds are
10089 + # already slow enough
10091 + if ( -f "/usr/bin/git" || -f "/usr/local/bin/git" ) {
10092 + if (not open(F, "git --git-dir $root/.git rev-parse --verify HEAD|")) {
10093 + $version = "no git version";
10097 + $version =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10098 + $version =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10101 + if (open(G, "git --git-dir $root/.git status --porcelain|")) {
10103 + $tainted =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10104 + $tainted =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10105 + if (length $tainted) {
10106 + $version = join ' ', $version, "(tainted)";
10109 + $version = join ' ', $version, "(clean)";
10115 +my $hostname = `hostname`;
10116 +$hostname =~ s/[ \r\n]*$//; # chomp may not be enough (cygwin).
10117 +$hostname =~ s/^[ \r\n]*//; # chomp may not be enough (cygwin).
10120 +print STDERR "Version $version\n";
10122 +#include "${prefix}_build_info.h"
10123 +#include <linux/broadcom/vc_debug_sym.h>
10125 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_hostname, "$hostname" );
10126 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_version, "$version" );
10127 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_time, __TIME__ );
10128 +VC_DEBUG_DECLARE_STRING_VAR( ${prefix}_build_date, __DATE__ );
10130 +const char *vchiq_get_build_hostname( void )
10132 + return vchiq_build_hostname;
10135 +const char *vchiq_get_build_version( void )
10137 + return vchiq_build_version;
10140 +const char *vchiq_get_build_date( void )
10142 + return vchiq_build_date;
10145 +const char *vchiq_get_build_time( void )
10147 + return vchiq_build_time;
10151 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h
10154 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10156 + * Redistribution and use in source and binary forms, with or without
10157 + * modification, are permitted provided that the following conditions
10159 + * 1. Redistributions of source code must retain the above copyright
10160 + * notice, this list of conditions, and the following disclaimer,
10161 + * without modification.
10162 + * 2. Redistributions in binary form must reproduce the above copyright
10163 + * notice, this list of conditions and the following disclaimer in the
10164 + * documentation and/or other materials provided with the distribution.
10165 + * 3. The names of the above-listed copyright holders may not be used
10166 + * to endorse or promote products derived from this software without
10167 + * specific prior written permission.
10169 + * ALTERNATIVELY, this software may be distributed under the terms of the
10170 + * GNU General Public License ("GPL") version 2, as published by the Free
10171 + * Software Foundation.
10173 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10174 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10175 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10176 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10177 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10178 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10179 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10180 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10181 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10182 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10183 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10186 +#ifndef VCHIQ_IF_H
10187 +#define VCHIQ_IF_H
10189 +#include "interface/vchi/vchi_mh.h"
10191 +#define VCHIQ_SERVICE_HANDLE_INVALID 0
10193 +#define VCHIQ_SLOT_SIZE 4096
10194 +#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(VCHIQ_HEADER_T))
10195 +#define VCHIQ_CHANNEL_SIZE VCHIQ_MAX_MSG_SIZE /* For backwards compatibility */
10197 +#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
10198 + (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
10199 +#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
10200 +#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
10203 + VCHIQ_SERVICE_OPENED, /* service, -, - */
10204 + VCHIQ_SERVICE_CLOSED, /* service, -, - */
10205 + VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
10206 + VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
10207 + VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
10208 + VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
10209 + VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
10213 + VCHIQ_ERROR = -1,
10214 + VCHIQ_SUCCESS = 0,
10219 + VCHIQ_BULK_MODE_CALLBACK,
10220 + VCHIQ_BULK_MODE_BLOCKING,
10221 + VCHIQ_BULK_MODE_NOCALLBACK,
10222 + VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
10223 +} VCHIQ_BULK_MODE_T;
10226 + VCHIQ_SERVICE_OPTION_AUTOCLOSE,
10227 + VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
10228 + VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
10229 + VCHIQ_SERVICE_OPTION_SYNCHRONOUS
10230 +} VCHIQ_SERVICE_OPTION_T;
10232 +typedef struct vchiq_header_struct {
10233 + /* The message identifier - opaque to applications. */
10236 + /* Size of message data. */
10237 + unsigned int size;
10239 + char data[0]; /* message */
10243 + const void *data;
10244 + unsigned int size;
10245 +} VCHIQ_ELEMENT_T;
10247 +typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
10249 +typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T, VCHIQ_HEADER_T *,
10250 + VCHIQ_SERVICE_HANDLE_T, void *);
10252 +typedef struct vchiq_service_base_struct {
10254 + VCHIQ_CALLBACK_T callback;
10256 +} VCHIQ_SERVICE_BASE_T;
10258 +typedef struct vchiq_service_params_struct {
10260 + VCHIQ_CALLBACK_T callback;
10262 + short version; /* Increment for non-trivial changes */
10263 + short version_min; /* Update for incompatible changes */
10264 +} VCHIQ_SERVICE_PARAMS_T;
10266 +typedef struct vchiq_config_struct {
10267 + unsigned int max_msg_size;
10268 + unsigned int bulk_threshold; /* The message size above which it
10269 + is better to use a bulk transfer
10270 + (<= max_msg_size) */
10271 + unsigned int max_outstanding_bulks;
10272 + unsigned int max_services;
10273 + short version; /* The version of VCHIQ */
10274 + short version_min; /* The minimum compatible version of VCHIQ */
10277 +typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
10278 +typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
10280 +extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
10281 +extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
10282 +extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
10283 +extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
10284 + const VCHIQ_SERVICE_PARAMS_T *params,
10285 + VCHIQ_SERVICE_HANDLE_T *pservice);
10286 +extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
10287 + const VCHIQ_SERVICE_PARAMS_T *params,
10288 + VCHIQ_SERVICE_HANDLE_T *pservice);
10289 +extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
10290 +extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
10291 +extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
10292 +extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
10293 + VCHIQ_SERVICE_HANDLE_T service);
10294 +extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
10296 +extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
10297 + const VCHIQ_ELEMENT_T *elements, unsigned int count);
10298 +extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
10299 + VCHIQ_HEADER_T *header);
10300 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10301 + const void *data, unsigned int size, void *userdata);
10302 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10303 + void *data, unsigned int size, void *userdata);
10304 +extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit_handle(
10305 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10306 + const void *offset, unsigned int size, void *userdata);
10307 +extern VCHIQ_STATUS_T vchiq_queue_bulk_receive_handle(
10308 + VCHIQ_SERVICE_HANDLE_T service, VCHI_MEM_HANDLE_T handle,
10309 + void *offset, unsigned int size, void *userdata);
10310 +extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
10311 + const void *data, unsigned int size, void *userdata,
10312 + VCHIQ_BULK_MODE_T mode);
10313 +extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
10314 + void *data, unsigned int size, void *userdata,
10315 + VCHIQ_BULK_MODE_T mode);
10316 +extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
10317 + VCHI_MEM_HANDLE_T handle, const void *offset, unsigned int size,
10318 + void *userdata, VCHIQ_BULK_MODE_T mode);
10319 +extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
10320 + VCHI_MEM_HANDLE_T handle, void *offset, unsigned int size,
10321 + void *userdata, VCHIQ_BULK_MODE_T mode);
10322 +extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
10323 +extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
10324 +extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
10325 +extern VCHIQ_STATUS_T vchiq_get_config(VCHIQ_INSTANCE_T instance,
10326 + int config_size, VCHIQ_CONFIG_T *pconfig);
10327 +extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
10328 + VCHIQ_SERVICE_OPTION_T option, int value);
10330 +extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
10331 + VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
10332 +extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
10334 +extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
10335 + void *ptr, size_t num_bytes);
10337 +extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
10338 + short *peer_version);
10340 +#endif /* VCHIQ_IF_H */
10342 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
10345 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10347 + * Redistribution and use in source and binary forms, with or without
10348 + * modification, are permitted provided that the following conditions
10350 + * 1. Redistributions of source code must retain the above copyright
10351 + * notice, this list of conditions, and the following disclaimer,
10352 + * without modification.
10353 + * 2. Redistributions in binary form must reproduce the above copyright
10354 + * notice, this list of conditions and the following disclaimer in the
10355 + * documentation and/or other materials provided with the distribution.
10356 + * 3. The names of the above-listed copyright holders may not be used
10357 + * to endorse or promote products derived from this software without
10358 + * specific prior written permission.
10360 + * ALTERNATIVELY, this software may be distributed under the terms of the
10361 + * GNU General Public License ("GPL") version 2, as published by the Free
10362 + * Software Foundation.
10364 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10365 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10366 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10367 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10368 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10369 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10370 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10371 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10372 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10373 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10374 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10377 +#ifndef VCHIQ_IOCTLS_H
10378 +#define VCHIQ_IOCTLS_H
10380 +#include <linux/ioctl.h>
10381 +#include "vchiq_if.h"
10383 +#define VCHIQ_IOC_MAGIC 0xc4
10384 +#define VCHIQ_INVALID_HANDLE (~0)
10387 + VCHIQ_SERVICE_PARAMS_T params;
10390 + unsigned int handle; /* OUT */
10391 +} VCHIQ_CREATE_SERVICE_T;
10394 + unsigned int handle;
10395 + unsigned int count;
10396 + const VCHIQ_ELEMENT_T *elements;
10397 +} VCHIQ_QUEUE_MESSAGE_T;
10400 + unsigned int handle;
10402 + unsigned int size;
10404 + VCHIQ_BULK_MODE_T mode;
10405 +} VCHIQ_QUEUE_BULK_TRANSFER_T;
10408 + VCHIQ_REASON_T reason;
10409 + VCHIQ_HEADER_T *header;
10410 + void *service_userdata;
10411 + void *bulk_userdata;
10412 +} VCHIQ_COMPLETION_DATA_T;
10415 + unsigned int count;
10416 + VCHIQ_COMPLETION_DATA_T *buf;
10417 + unsigned int msgbufsize;
10418 + unsigned int msgbufcount; /* IN/OUT */
10420 +} VCHIQ_AWAIT_COMPLETION_T;
10423 + unsigned int handle;
10425 + unsigned int bufsize;
10427 +} VCHIQ_DEQUEUE_MESSAGE_T;
10430 + unsigned int config_size;
10431 + VCHIQ_CONFIG_T *pconfig;
10432 +} VCHIQ_GET_CONFIG_T;
10435 + unsigned int handle;
10436 + VCHIQ_SERVICE_OPTION_T option;
10438 +} VCHIQ_SET_SERVICE_OPTION_T;
10442 + size_t num_bytes;
10443 +} VCHIQ_DUMP_MEM_T;
10445 +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0)
10446 +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1)
10447 +#define VCHIQ_IOC_CREATE_SERVICE \
10448 + _IOWR(VCHIQ_IOC_MAGIC, 2, VCHIQ_CREATE_SERVICE_T)
10449 +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3)
10450 +#define VCHIQ_IOC_QUEUE_MESSAGE \
10451 + _IOW(VCHIQ_IOC_MAGIC, 4, VCHIQ_QUEUE_MESSAGE_T)
10452 +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \
10453 + _IOWR(VCHIQ_IOC_MAGIC, 5, VCHIQ_QUEUE_BULK_TRANSFER_T)
10454 +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \
10455 + _IOWR(VCHIQ_IOC_MAGIC, 6, VCHIQ_QUEUE_BULK_TRANSFER_T)
10456 +#define VCHIQ_IOC_AWAIT_COMPLETION \
10457 + _IOWR(VCHIQ_IOC_MAGIC, 7, VCHIQ_AWAIT_COMPLETION_T)
10458 +#define VCHIQ_IOC_DEQUEUE_MESSAGE \
10459 + _IOWR(VCHIQ_IOC_MAGIC, 8, VCHIQ_DEQUEUE_MESSAGE_T)
10460 +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9)
10461 +#define VCHIQ_IOC_GET_CONFIG \
10462 + _IOWR(VCHIQ_IOC_MAGIC, 10, VCHIQ_GET_CONFIG_T)
10463 +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11)
10464 +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12)
10465 +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13)
10466 +#define VCHIQ_IOC_SET_SERVICE_OPTION \
10467 + _IOW(VCHIQ_IOC_MAGIC, 14, VCHIQ_SET_SERVICE_OPTION_T)
10468 +#define VCHIQ_IOC_DUMP_PHYS_MEM \
10469 + _IOW(VCHIQ_IOC_MAGIC, 15, VCHIQ_DUMP_MEM_T)
10470 +#define VCHIQ_IOC_MAX 15
10474 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
10477 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10479 + * Redistribution and use in source and binary forms, with or without
10480 + * modification, are permitted provided that the following conditions
10482 + * 1. Redistributions of source code must retain the above copyright
10483 + * notice, this list of conditions, and the following disclaimer,
10484 + * without modification.
10485 + * 2. Redistributions in binary form must reproduce the above copyright
10486 + * notice, this list of conditions and the following disclaimer in the
10487 + * documentation and/or other materials provided with the distribution.
10488 + * 3. The names of the above-listed copyright holders may not be used
10489 + * to endorse or promote products derived from this software without
10490 + * specific prior written permission.
10492 + * ALTERNATIVELY, this software may be distributed under the terms of the
10493 + * GNU General Public License ("GPL") version 2, as published by the Free
10494 + * Software Foundation.
10496 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10497 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10498 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10499 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10500 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10501 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10502 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10503 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10504 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10505 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10506 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10509 +/* ---- Include Files ---------------------------------------------------- */
10511 +#include <linux/kernel.h>
10512 +#include <linux/module.h>
10513 +#include <linux/mutex.h>
10515 +#include "vchiq_core.h"
10516 +#include "vchiq_arm.h"
10518 +/* ---- Public Variables ------------------------------------------------- */
10520 +/* ---- Private Constants and Types -------------------------------------- */
10522 +struct bulk_waiter_node {
10523 + struct bulk_waiter bulk_waiter;
10525 + struct list_head list;
10528 +struct vchiq_instance_struct {
10529 + VCHIQ_STATE_T *state;
10533 + struct list_head bulk_waiter_list;
10534 + struct mutex bulk_waiter_list_mutex;
10537 +static VCHIQ_STATUS_T
10538 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10539 + unsigned int size, VCHIQ_BULK_DIR_T dir);
10541 +/****************************************************************************
10543 +* vchiq_initialise
10545 +***************************************************************************/
10546 +#define VCHIQ_INIT_RETRIES 10
10547 +VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
10549 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10550 + VCHIQ_STATE_T *state;
10551 + VCHIQ_INSTANCE_T instance = NULL;
10554 + vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
10556 + /* VideoCore may not be ready due to boot up timing.
10557 + It may never be ready if kernel and firmware are mismatched, so don't block forever. */
10558 + for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
10559 + state = vchiq_get_state();
10564 + if (i==VCHIQ_INIT_RETRIES) {
10565 + vchiq_log_error(vchiq_core_log_level,
10566 + "%s: videocore not initialized\n", __func__);
10568 + } else if (i>0) {
10569 + vchiq_log_warning(vchiq_core_log_level,
10570 + "%s: videocore initialized after %d retries\n", __func__, i);
10573 + instance = kzalloc(sizeof(*instance), GFP_KERNEL);
10575 + vchiq_log_error(vchiq_core_log_level,
10576 + "%s: error allocating vchiq instance\n", __func__);
10580 + instance->connected = 0;
10581 + instance->state = state;
10582 + mutex_init(&instance->bulk_waiter_list_mutex);
10583 + INIT_LIST_HEAD(&instance->bulk_waiter_list);
10585 + *instanceOut = instance;
10587 + status = VCHIQ_SUCCESS;
10590 + vchiq_log_trace(vchiq_core_log_level,
10591 + "%s(%p): returning %d", __func__, instance, status);
10595 +EXPORT_SYMBOL(vchiq_initialise);
10597 +/****************************************************************************
10601 +***************************************************************************/
10603 +VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
10605 + VCHIQ_STATUS_T status;
10606 + VCHIQ_STATE_T *state = instance->state;
10608 + vchiq_log_trace(vchiq_core_log_level,
10609 + "%s(%p) called", __func__, instance);
10611 + if (mutex_lock_interruptible(&state->mutex) != 0)
10612 + return VCHIQ_RETRY;
10614 + /* Remove all services */
10615 + status = vchiq_shutdown_internal(state, instance);
10617 + mutex_unlock(&state->mutex);
10619 + vchiq_log_trace(vchiq_core_log_level,
10620 + "%s(%p): returning %d", __func__, instance, status);
10622 + if (status == VCHIQ_SUCCESS) {
10623 + struct list_head *pos, *next;
10624 + list_for_each_safe(pos, next,
10625 + &instance->bulk_waiter_list) {
10626 + struct bulk_waiter_node *waiter;
10627 + waiter = list_entry(pos,
10628 + struct bulk_waiter_node,
10631 + vchiq_log_info(vchiq_arm_log_level,
10632 + "bulk_waiter - cleaned up %x "
10634 + (unsigned int)waiter, waiter->pid);
10642 +EXPORT_SYMBOL(vchiq_shutdown);
10644 +/****************************************************************************
10646 +* vchiq_is_connected
10648 +***************************************************************************/
10650 +int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
10652 + return instance->connected;
10655 +/****************************************************************************
10659 +***************************************************************************/
10661 +VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
10663 + VCHIQ_STATUS_T status;
10664 + VCHIQ_STATE_T *state = instance->state;
10666 + vchiq_log_trace(vchiq_core_log_level,
10667 + "%s(%p) called", __func__, instance);
10669 + if (mutex_lock_interruptible(&state->mutex) != 0) {
10670 + vchiq_log_trace(vchiq_core_log_level,
10671 + "%s: call to mutex_lock failed", __func__);
10672 + status = VCHIQ_RETRY;
10675 + status = vchiq_connect_internal(state, instance);
10677 + if (status == VCHIQ_SUCCESS)
10678 + instance->connected = 1;
10680 + mutex_unlock(&state->mutex);
10683 + vchiq_log_trace(vchiq_core_log_level,
10684 + "%s(%p): returning %d", __func__, instance, status);
10688 +EXPORT_SYMBOL(vchiq_connect);
10690 +/****************************************************************************
10692 +* vchiq_add_service
10694 +***************************************************************************/
10696 +VCHIQ_STATUS_T vchiq_add_service(
10697 + VCHIQ_INSTANCE_T instance,
10698 + const VCHIQ_SERVICE_PARAMS_T *params,
10699 + VCHIQ_SERVICE_HANDLE_T *phandle)
10701 + VCHIQ_STATUS_T status;
10702 + VCHIQ_STATE_T *state = instance->state;
10703 + VCHIQ_SERVICE_T *service = NULL;
10706 + vchiq_log_trace(vchiq_core_log_level,
10707 + "%s(%p) called", __func__, instance);
10709 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10711 + srvstate = vchiq_is_connected(instance)
10712 + ? VCHIQ_SRVSTATE_LISTENING
10713 + : VCHIQ_SRVSTATE_HIDDEN;
10715 + service = vchiq_add_service_internal(
10723 + *phandle = service->handle;
10724 + status = VCHIQ_SUCCESS;
10726 + status = VCHIQ_ERROR;
10728 + vchiq_log_trace(vchiq_core_log_level,
10729 + "%s(%p): returning %d", __func__, instance, status);
10733 +EXPORT_SYMBOL(vchiq_add_service);
10735 +/****************************************************************************
10737 +* vchiq_open_service
10739 +***************************************************************************/
10741 +VCHIQ_STATUS_T vchiq_open_service(
10742 + VCHIQ_INSTANCE_T instance,
10743 + const VCHIQ_SERVICE_PARAMS_T *params,
10744 + VCHIQ_SERVICE_HANDLE_T *phandle)
10746 + VCHIQ_STATUS_T status = VCHIQ_ERROR;
10747 + VCHIQ_STATE_T *state = instance->state;
10748 + VCHIQ_SERVICE_T *service = NULL;
10750 + vchiq_log_trace(vchiq_core_log_level,
10751 + "%s(%p) called", __func__, instance);
10753 + *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
10755 + if (!vchiq_is_connected(instance))
10758 + service = vchiq_add_service_internal(state,
10760 + VCHIQ_SRVSTATE_OPENING,
10765 + status = vchiq_open_service_internal(service, current->pid);
10766 + if (status == VCHIQ_SUCCESS)
10767 + *phandle = service->handle;
10769 + vchiq_remove_service(service->handle);
10773 + vchiq_log_trace(vchiq_core_log_level,
10774 + "%s(%p): returning %d", __func__, instance, status);
10778 +EXPORT_SYMBOL(vchiq_open_service);
10781 +vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
10782 + const void *data, unsigned int size, void *userdata)
10784 + return vchiq_bulk_transfer(handle,
10785 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10786 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
10788 +EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
10791 +vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10792 + unsigned int size, void *userdata)
10794 + return vchiq_bulk_transfer(handle,
10795 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10796 + VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
10798 +EXPORT_SYMBOL(vchiq_queue_bulk_receive);
10801 +vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
10802 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10804 + VCHIQ_STATUS_T status;
10807 + case VCHIQ_BULK_MODE_NOCALLBACK:
10808 + case VCHIQ_BULK_MODE_CALLBACK:
10809 + status = vchiq_bulk_transfer(handle,
10810 + VCHI_MEM_HANDLE_INVALID, (void *)data, size, userdata,
10811 + mode, VCHIQ_BULK_TRANSMIT);
10813 + case VCHIQ_BULK_MODE_BLOCKING:
10814 + status = vchiq_blocking_bulk_transfer(handle,
10815 + (void *)data, size, VCHIQ_BULK_TRANSMIT);
10818 + return VCHIQ_ERROR;
10823 +EXPORT_SYMBOL(vchiq_bulk_transmit);
10826 +vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10827 + unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
10829 + VCHIQ_STATUS_T status;
10832 + case VCHIQ_BULK_MODE_NOCALLBACK:
10833 + case VCHIQ_BULK_MODE_CALLBACK:
10834 + status = vchiq_bulk_transfer(handle,
10835 + VCHI_MEM_HANDLE_INVALID, data, size, userdata,
10836 + mode, VCHIQ_BULK_RECEIVE);
10838 + case VCHIQ_BULK_MODE_BLOCKING:
10839 + status = vchiq_blocking_bulk_transfer(handle,
10840 + (void *)data, size, VCHIQ_BULK_RECEIVE);
10843 + return VCHIQ_ERROR;
10848 +EXPORT_SYMBOL(vchiq_bulk_receive);
10850 +static VCHIQ_STATUS_T
10851 +vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
10852 + unsigned int size, VCHIQ_BULK_DIR_T dir)
10854 + VCHIQ_INSTANCE_T instance;
10855 + VCHIQ_SERVICE_T *service;
10856 + VCHIQ_STATUS_T status;
10857 + struct bulk_waiter_node *waiter = NULL;
10858 + struct list_head *pos;
10860 + service = find_service_by_handle(handle);
10862 + return VCHIQ_ERROR;
10864 + instance = service->instance;
10866 + unlock_service(service);
10868 + mutex_lock(&instance->bulk_waiter_list_mutex);
10869 + list_for_each(pos, &instance->bulk_waiter_list) {
10870 + if (list_entry(pos, struct bulk_waiter_node,
10871 + list)->pid == current->pid) {
10872 + waiter = list_entry(pos,
10873 + struct bulk_waiter_node,
10879 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10882 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10884 + /* This thread has an outstanding bulk transfer. */
10885 + if ((bulk->data != data) ||
10886 + (bulk->size != size)) {
10887 + /* This is not a retry of the previous one.
10888 + ** Cancel the signal when the transfer
10890 + spin_lock(&bulk_waiter_spinlock);
10891 + bulk->userdata = NULL;
10892 + spin_unlock(&bulk_waiter_spinlock);
10898 + waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
10900 + vchiq_log_error(vchiq_core_log_level,
10901 + "%s - out of memory", __func__);
10902 + return VCHIQ_ERROR;
10906 + status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
10907 + data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
10909 + if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
10910 + !waiter->bulk_waiter.bulk) {
10911 + VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
10913 + /* Cancel the signal when the transfer
10915 + spin_lock(&bulk_waiter_spinlock);
10916 + bulk->userdata = NULL;
10917 + spin_unlock(&bulk_waiter_spinlock);
10921 + waiter->pid = current->pid;
10922 + mutex_lock(&instance->bulk_waiter_list_mutex);
10923 + list_add(&waiter->list, &instance->bulk_waiter_list);
10924 + mutex_unlock(&instance->bulk_waiter_list_mutex);
10925 + vchiq_log_info(vchiq_arm_log_level,
10926 + "saved bulk_waiter %x for pid %d",
10927 + (unsigned int)waiter, current->pid);
10933 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
10936 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
10938 + * Redistribution and use in source and binary forms, with or without
10939 + * modification, are permitted provided that the following conditions
10941 + * 1. Redistributions of source code must retain the above copyright
10942 + * notice, this list of conditions, and the following disclaimer,
10943 + * without modification.
10944 + * 2. Redistributions in binary form must reproduce the above copyright
10945 + * notice, this list of conditions and the following disclaimer in the
10946 + * documentation and/or other materials provided with the distribution.
10947 + * 3. The names of the above-listed copyright holders may not be used
10948 + * to endorse or promote products derived from this software without
10949 + * specific prior written permission.
10951 + * ALTERNATIVELY, this software may be distributed under the terms of the
10952 + * GNU General Public License ("GPL") version 2, as published by the Free
10953 + * Software Foundation.
10955 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
10956 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
10957 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
10958 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
10959 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
10960 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
10961 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
10962 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
10963 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
10964 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
10965 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10968 +#ifndef VCHIQ_MEMDRV_H
10969 +#define VCHIQ_MEMDRV_H
10971 +/* ---- Include Files ----------------------------------------------------- */
10973 +#include <linux/kernel.h>
10974 +#include "vchiq_if.h"
10976 +/* ---- Constants and Types ---------------------------------------------- */
10979 + void *armSharedMemVirt;
10980 + dma_addr_t armSharedMemPhys;
10981 + size_t armSharedMemSize;
10983 + void *vcSharedMemVirt;
10984 + dma_addr_t vcSharedMemPhys;
10985 + size_t vcSharedMemSize;
10986 +} VCHIQ_SHARED_MEM_INFO_T;
10988 +/* ---- Variable Externs ------------------------------------------------- */
10990 +/* ---- Function Prototypes ---------------------------------------------- */
10992 +void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
10994 +VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
10996 +VCHIQ_STATUS_T vchiq_userdrv_create_instance(
10997 + const VCHIQ_PLATFORM_DATA_T * platform_data);
10999 +VCHIQ_STATUS_T vchiq_userdrv_suspend(
11000 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11002 +VCHIQ_STATUS_T vchiq_userdrv_resume(
11003 + const VCHIQ_PLATFORM_DATA_T * platform_data);
11007 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
11010 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11012 + * Redistribution and use in source and binary forms, with or without
11013 + * modification, are permitted provided that the following conditions
11015 + * 1. Redistributions of source code must retain the above copyright
11016 + * notice, this list of conditions, and the following disclaimer,
11017 + * without modification.
11018 + * 2. Redistributions in binary form must reproduce the above copyright
11019 + * notice, this list of conditions and the following disclaimer in the
11020 + * documentation and/or other materials provided with the distribution.
11021 + * 3. The names of the above-listed copyright holders may not be used
11022 + * to endorse or promote products derived from this software without
11023 + * specific prior written permission.
11025 + * ALTERNATIVELY, this software may be distributed under the terms of the
11026 + * GNU General Public License ("GPL") version 2, as published by the Free
11027 + * Software Foundation.
11029 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11030 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11031 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11032 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11033 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11034 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11035 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11036 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11037 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11038 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11039 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11042 +#ifndef VCHIQ_PAGELIST_H
11043 +#define VCHIQ_PAGELIST_H
11046 +#define PAGE_SIZE 4096
11048 +#define CACHE_LINE_SIZE 32
11049 +#define PAGELIST_WRITE 0
11050 +#define PAGELIST_READ 1
11051 +#define PAGELIST_READ_WITH_FRAGMENTS 2
11053 +typedef struct pagelist_struct {
11054 + unsigned long length;
11055 + unsigned short type;
11056 + unsigned short offset;
11057 + unsigned long addrs[1]; /* N.B. 12 LSBs hold the number of following
11058 + pages at consecutive addresses. */
11061 +typedef struct fragments_struct {
11062 + char headbuf[CACHE_LINE_SIZE];
11063 + char tailbuf[CACHE_LINE_SIZE];
11066 +#endif /* VCHIQ_PAGELIST_H */
11068 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_proc.c
11071 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11073 + * Redistribution and use in source and binary forms, with or without
11074 + * modification, are permitted provided that the following conditions
11076 + * 1. Redistributions of source code must retain the above copyright
11077 + * notice, this list of conditions, and the following disclaimer,
11078 + * without modification.
11079 + * 2. Redistributions in binary form must reproduce the above copyright
11080 + * notice, this list of conditions and the following disclaimer in the
11081 + * documentation and/or other materials provided with the distribution.
11082 + * 3. The names of the above-listed copyright holders may not be used
11083 + * to endorse or promote products derived from this software without
11084 + * specific prior written permission.
11086 + * ALTERNATIVELY, this software may be distributed under the terms of the
11087 + * GNU General Public License ("GPL") version 2, as published by the Free
11088 + * Software Foundation.
11090 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11091 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11092 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11093 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11094 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11095 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11096 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11097 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11098 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11099 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11100 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11104 +#include <linux/proc_fs.h>
11105 +#include "vchiq_core.h"
11106 +#include "vchiq_arm.h"
11110 +int vchiq_proc_init(void)
11115 +void vchiq_proc_deinit(void)
11121 +struct vchiq_proc_info {
11122 + /* Global 'vc' proc entry used by all instances */
11123 + struct proc_dir_entry *vc_cfg_dir;
11125 + /* one entry per client process */
11126 + struct proc_dir_entry *clients;
11128 + /* log categories */
11129 + struct proc_dir_entry *log_categories;
11132 +static struct vchiq_proc_info proc_info;
11134 +struct proc_dir_entry *vchiq_proc_top(void)
11136 + BUG_ON(proc_info.vc_cfg_dir == NULL);
11137 + return proc_info.vc_cfg_dir;
11140 +/****************************************************************************
11142 +* log category entries
11144 +***************************************************************************/
11145 +#define PROC_WRITE_BUF_SIZE 256
11147 +#define VCHIQ_LOG_ERROR_STR "error"
11148 +#define VCHIQ_LOG_WARNING_STR "warning"
11149 +#define VCHIQ_LOG_INFO_STR "info"
11150 +#define VCHIQ_LOG_TRACE_STR "trace"
11152 +static int log_cfg_read(char *buffer,
11160 + char *log_value = NULL;
11162 + switch (*((int *)data)) {
11163 + case VCHIQ_LOG_ERROR:
11164 + log_value = VCHIQ_LOG_ERROR_STR;
11166 + case VCHIQ_LOG_WARNING:
11167 + log_value = VCHIQ_LOG_WARNING_STR;
11169 + case VCHIQ_LOG_INFO:
11170 + log_value = VCHIQ_LOG_INFO_STR;
11172 + case VCHIQ_LOG_TRACE:
11173 + log_value = VCHIQ_LOG_TRACE_STR;
11179 + len += sprintf(buffer + len,
11181 + log_value ? log_value : "(null)");
11187 +static int log_cfg_write(struct file *file,
11188 + const char __user *buffer,
11189 + unsigned long count,
11192 + int *log_module = data;
11193 + char kbuf[PROC_WRITE_BUF_SIZE + 1];
11197 + memset(kbuf, 0, PROC_WRITE_BUF_SIZE + 1);
11198 + if (count >= PROC_WRITE_BUF_SIZE)
11199 + count = PROC_WRITE_BUF_SIZE;
11201 + if (copy_from_user(kbuf,
11205 + kbuf[count - 1] = 0;
11207 + if (strncmp("error", kbuf, strlen("error")) == 0)
11208 + *log_module = VCHIQ_LOG_ERROR;
11209 + else if (strncmp("warning", kbuf, strlen("warning")) == 0)
11210 + *log_module = VCHIQ_LOG_WARNING;
11211 + else if (strncmp("info", kbuf, strlen("info")) == 0)
11212 + *log_module = VCHIQ_LOG_INFO;
11213 + else if (strncmp("trace", kbuf, strlen("trace")) == 0)
11214 + *log_module = VCHIQ_LOG_TRACE;
11216 + *log_module = VCHIQ_LOG_DEFAULT;
11221 +/* Log category proc entries */
11222 +struct vchiq_proc_log_entry {
11223 + const char *name;
11225 + struct proc_dir_entry *dir;
11228 +static struct vchiq_proc_log_entry vchiq_proc_log_entries[] = {
11229 + { "core", &vchiq_core_log_level },
11230 + { "msg", &vchiq_core_msg_log_level },
11231 + { "sync", &vchiq_sync_log_level },
11232 + { "susp", &vchiq_susp_log_level },
11233 + { "arm", &vchiq_arm_log_level },
11235 +static int n_log_entries =
11236 + sizeof(vchiq_proc_log_entries)/sizeof(vchiq_proc_log_entries[0]);
11238 +/* create an entry under /proc/vc/log for each log category */
11239 +static int vchiq_proc_create_log_entries(struct proc_dir_entry *top)
11241 + struct proc_dir_entry *dir;
11244 + dir = proc_mkdir("log", proc_info.vc_cfg_dir);
11247 + proc_info.log_categories = dir;
11249 + for (i = 0; i < n_log_entries; i++) {
11250 + dir = create_proc_entry(vchiq_proc_log_entries[i].name,
11252 + proc_info.log_categories);
11258 + dir->read_proc = &log_cfg_read;
11259 + dir->write_proc = &log_cfg_write;
11260 + dir->data = (void *)vchiq_proc_log_entries[i].plevel;
11262 + vchiq_proc_log_entries[i].dir = dir;
11268 +int vchiq_proc_init(void)
11270 + BUG_ON(proc_info.vc_cfg_dir != NULL);
11272 + proc_info.vc_cfg_dir = proc_mkdir("vc", NULL);
11273 + if (proc_info.vc_cfg_dir == NULL)
11276 + proc_info.clients = proc_mkdir("clients",
11277 + proc_info.vc_cfg_dir);
11278 + if (!proc_info.clients)
11281 + if (vchiq_proc_create_log_entries(proc_info.vc_cfg_dir) != 0)
11287 + vchiq_proc_deinit();
11288 + vchiq_log_error(vchiq_arm_log_level,
11289 + "%s: failed to create proc directory",
11295 +/* remove all the proc entries */
11296 +void vchiq_proc_deinit(void)
11298 + /* log category entries */
11299 + if (proc_info.log_categories) {
11301 + for (i = 0; i < n_log_entries; i++)
11302 + if (vchiq_proc_log_entries[i].dir)
11303 + remove_proc_entry(
11304 + vchiq_proc_log_entries[i].name,
11305 + proc_info.log_categories);
11307 + remove_proc_entry(proc_info.log_categories->name,
11308 + proc_info.vc_cfg_dir);
11310 + if (proc_info.clients)
11311 + remove_proc_entry(proc_info.clients->name,
11312 + proc_info.vc_cfg_dir);
11313 + if (proc_info.vc_cfg_dir)
11314 + remove_proc_entry(proc_info.vc_cfg_dir->name, NULL);
11317 +struct proc_dir_entry *vchiq_clients_top(void)
11319 + return proc_info.clients;
11324 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c
11327 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
11329 + * Redistribution and use in source and binary forms, with or without
11330 + * modification, are permitted provided that the following conditions
11332 + * 1. Redistributions of source code must retain the above copyright
11333 + * notice, this list of conditions, and the following disclaimer,
11334 + * without modification.
11335 + * 2. Redistributions in binary form must reproduce the above copyright
11336 + * notice, this list of conditions and the following disclaimer in the
11337 + * documentation and/or other materials provided with the distribution.
11338 + * 3. The names of the above-listed copyright holders may not be used
11339 + * to endorse or promote products derived from this software without
11340 + * specific prior written permission.
11342 + * ALTERNATIVELY, this software may be distributed under the terms of the
11343 + * GNU General Public License ("GPL") version 2, as published by the Free
11344 + * Software Foundation.
11346 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
11347 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
11348 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
11349 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
11350 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
11351 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
11352 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
11353 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11354 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11355 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11356 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11358 +#include <linux/module.h>
11359 +#include <linux/types.h>
11361 +#include "interface/vchi/vchi.h"
11362 +#include "vchiq.h"
11363 +#include "vchiq_core.h"
11365 +#include "vchiq_util.h"
11367 +#include <stddef.h>
11369 +#define vchiq_status_to_vchi(status) ((int32_t)status)
11372 + VCHIQ_SERVICE_HANDLE_T handle;
11374 + VCHIU_QUEUE_T queue;
11376 + VCHI_CALLBACK_T callback;
11377 + void *callback_param;
11380 +/* ----------------------------------------------------------------------
11381 + * return pointer to the mphi message driver function table
11382 + * -------------------------------------------------------------------- */
11383 +const VCHI_MESSAGE_DRIVER_T *
11384 +vchi_mphi_message_driver_func_table(void)
11389 +/* ----------------------------------------------------------------------
11390 + * return a pointer to the 'single' connection driver fops
11391 + * -------------------------------------------------------------------- */
11392 +const VCHI_CONNECTION_API_T *
11393 +single_get_func_table(void)
11398 +VCHI_CONNECTION_T *vchi_create_connection(
11399 + const VCHI_CONNECTION_API_T *function_table,
11400 + const VCHI_MESSAGE_DRIVER_T *low_level)
11402 + (void)function_table;
11407 +/***********************************************************
11408 + * Name: vchi_msg_peek
11410 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11412 + * uint32_t *msg_size,
11415 + * VCHI_FLAGS_T flags
11417 + * Description: Routine to return a pointer to the current message (to allow in
11418 + * place processing). The message can be removed using
11419 + * vchi_msg_remove when you're finished
11421 + * Returns: int32_t - success == 0
11423 + ***********************************************************/
11424 +int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
11426 + uint32_t *msg_size,
11427 + VCHI_FLAGS_T flags)
11429 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11430 + VCHIQ_HEADER_T *header;
11432 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11433 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11435 + if (flags == VCHI_FLAGS_NONE)
11436 + if (vchiu_queue_is_empty(&service->queue))
11439 + header = vchiu_queue_peek(&service->queue);
11441 + *data = header->data;
11442 + *msg_size = header->size;
11446 +EXPORT_SYMBOL(vchi_msg_peek);
11448 +/***********************************************************
11449 + * Name: vchi_msg_remove
11451 + * Arguments: const VCHI_SERVICE_HANDLE_T handle,
11453 + * Description: Routine to remove a message (after it has been read with
11456 + * Returns: int32_t - success == 0
11458 + ***********************************************************/
11459 +int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
11461 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11462 + VCHIQ_HEADER_T *header;
11464 + header = vchiu_queue_pop(&service->queue);
11466 + vchiq_release_message(service->handle, header);
11470 +EXPORT_SYMBOL(vchi_msg_remove);
11472 +/***********************************************************
11473 + * Name: vchi_msg_queue
11475 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11476 + * const void *data,
11477 + * uint32_t data_size,
11478 + * VCHI_FLAGS_T flags,
11479 + * void *msg_handle,
11481 + * Description: Thin wrapper to queue a message onto a connection
11483 + * Returns: int32_t - success == 0
11485 + ***********************************************************/
11486 +int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
11487 + const void *data,
11488 + uint32_t data_size,
11489 + VCHI_FLAGS_T flags,
11490 + void *msg_handle)
11492 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11493 + VCHIQ_ELEMENT_T element = {data, data_size};
11494 + VCHIQ_STATUS_T status;
11496 + (void)msg_handle;
11498 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11500 + status = vchiq_queue_message(service->handle, &element, 1);
11502 + /* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
11503 + ** implement a retry mechanism since this function is supposed
11504 + ** to block until queued
11506 + while (status == VCHIQ_RETRY) {
11508 + status = vchiq_queue_message(service->handle, &element, 1);
11511 + return vchiq_status_to_vchi(status);
11513 +EXPORT_SYMBOL(vchi_msg_queue);
11515 +/***********************************************************
11516 + * Name: vchi_bulk_queue_receive
11518 + * Arguments: VCHI_BULK_HANDLE_T handle,
11519 + * void *data_dst,
11520 + * const uint32_t data_size,
11521 + * VCHI_FLAGS_T flags
11522 + * void *bulk_handle
11524 + * Description: Routine to setup a rcv buffer
11526 + * Returns: int32_t - success == 0
11528 + ***********************************************************/
11529 +int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
11531 + uint32_t data_size,
11532 + VCHI_FLAGS_T flags,
11533 + void *bulk_handle)
11535 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11536 + VCHIQ_BULK_MODE_T mode;
11537 + VCHIQ_STATUS_T status;
11539 + switch ((int)flags) {
11540 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11541 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11542 + WARN_ON(!service->callback);
11543 + mode = VCHIQ_BULK_MODE_CALLBACK;
11545 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11546 + mode = VCHIQ_BULK_MODE_BLOCKING;
11548 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11549 + case VCHI_FLAGS_NONE:
11550 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11553 + WARN(1, "unsupported message\n");
11554 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11557 + status = vchiq_bulk_receive(service->handle, data_dst, data_size,
11558 + bulk_handle, mode);
11560 + /* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
11561 + ** implement a retry mechanism since this function is supposed
11562 + ** to block until queued
11564 + while (status == VCHIQ_RETRY) {
11566 + status = vchiq_bulk_receive(service->handle, data_dst,
11567 + data_size, bulk_handle, mode);
11570 + return vchiq_status_to_vchi(status);
11572 +EXPORT_SYMBOL(vchi_bulk_queue_receive);
11574 +/***********************************************************
11575 + * Name: vchi_bulk_queue_transmit
11577 + * Arguments: VCHI_BULK_HANDLE_T handle,
11578 + * const void *data_src,
11579 + * uint32_t data_size,
11580 + * VCHI_FLAGS_T flags,
11581 + * void *bulk_handle
11583 + * Description: Routine to transmit some data
11585 + * Returns: int32_t - success == 0
11587 + ***********************************************************/
11588 +int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
11589 + const void *data_src,
11590 + uint32_t data_size,
11591 + VCHI_FLAGS_T flags,
11592 + void *bulk_handle)
11594 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11595 + VCHIQ_BULK_MODE_T mode;
11596 + VCHIQ_STATUS_T status;
11598 + switch ((int)flags) {
11599 + case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
11600 + | VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11601 + WARN_ON(!service->callback);
11602 + mode = VCHIQ_BULK_MODE_CALLBACK;
11604 + case VCHI_FLAGS_BLOCK_UNTIL_DATA_READ:
11605 + case VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE:
11606 + mode = VCHIQ_BULK_MODE_BLOCKING;
11608 + case VCHI_FLAGS_BLOCK_UNTIL_QUEUED:
11609 + case VCHI_FLAGS_NONE:
11610 + mode = VCHIQ_BULK_MODE_NOCALLBACK;
11613 + WARN(1, "unsupported message\n");
11614 + return vchiq_status_to_vchi(VCHIQ_ERROR);
11617 + status = vchiq_bulk_transmit(service->handle, data_src, data_size,
11618 + bulk_handle, mode);
11620 + /* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
11621 + ** implement a retry mechanism since this function is supposed
11622 + ** to block until queued
11624 + while (status == VCHIQ_RETRY) {
11626 + status = vchiq_bulk_transmit(service->handle, data_src,
11627 + data_size, bulk_handle, mode);
11630 + return vchiq_status_to_vchi(status);
11632 +EXPORT_SYMBOL(vchi_bulk_queue_transmit);
11634 +/***********************************************************
11635 + * Name: vchi_msg_dequeue
11637 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11639 + * uint32_t max_data_size_to_read,
11640 + * uint32_t *actual_msg_size
11641 + * VCHI_FLAGS_T flags
11643 + * Description: Routine to dequeue a message into the supplied buffer
11645 + * Returns: int32_t - success == 0
11647 + ***********************************************************/
11648 +int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
11650 + uint32_t max_data_size_to_read,
11651 + uint32_t *actual_msg_size,
11652 + VCHI_FLAGS_T flags)
11654 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11655 + VCHIQ_HEADER_T *header;
11657 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11658 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11660 + if (flags == VCHI_FLAGS_NONE)
11661 + if (vchiu_queue_is_empty(&service->queue))
11664 + header = vchiu_queue_pop(&service->queue);
11666 + memcpy(data, header->data, header->size < max_data_size_to_read ?
11667 + header->size : max_data_size_to_read);
11669 + *actual_msg_size = header->size;
11671 + vchiq_release_message(service->handle, header);
11675 +EXPORT_SYMBOL(vchi_msg_dequeue);
11677 +/***********************************************************
11678 + * Name: vchi_msg_queuev
11680 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11681 + * VCHI_MSG_VECTOR_T *vector,
11682 + * uint32_t count,
11683 + * VCHI_FLAGS_T flags,
11684 + * void *msg_handle
11686 + * Description: Thin wrapper to queue a message onto a connection
11688 + * Returns: int32_t - success == 0
11690 + ***********************************************************/
11692 +vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
11693 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
11694 + offsetof(VCHIQ_ELEMENT_T, data));
11695 +vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
11696 + offsetof(VCHIQ_ELEMENT_T, size));
11698 +int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
11699 + VCHI_MSG_VECTOR_T *vector,
11701 + VCHI_FLAGS_T flags,
11702 + void *msg_handle)
11704 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11706 + (void)msg_handle;
11708 + WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
11710 + return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
11711 + (const VCHIQ_ELEMENT_T *)vector, count));
11713 +EXPORT_SYMBOL(vchi_msg_queuev);
11715 +/***********************************************************
11716 + * Name: vchi_held_msg_release
11718 + * Arguments: VCHI_HELD_MSG_T *message
11720 + * Description: Routine to release a held message (after it has been read with
11723 + * Returns: int32_t - success == 0
11725 + ***********************************************************/
11726 +int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
11728 + vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
11729 + (VCHIQ_HEADER_T *)message->message);
11733 +EXPORT_SYMBOL(vchi_held_msg_release);
11735 +/***********************************************************
11736 + * Name: vchi_msg_hold
11738 + * Arguments: VCHI_SERVICE_HANDLE_T handle,
11740 + * uint32_t *msg_size,
11741 + * VCHI_FLAGS_T flags,
11742 + * VCHI_HELD_MSG_T *message_handle
11744 + * Description: Routine to return a pointer to the current message (to allow
11745 + * in place processing). The message is dequeued - don't forget
11746 + * to release the message using vchi_held_msg_release when you're
11749 + * Returns: int32_t - success == 0
11751 + ***********************************************************/
11752 +int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
11754 + uint32_t *msg_size,
11755 + VCHI_FLAGS_T flags,
11756 + VCHI_HELD_MSG_T *message_handle)
11758 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
11759 + VCHIQ_HEADER_T *header;
11761 + WARN_ON((flags != VCHI_FLAGS_NONE) &&
11762 + (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE));
11764 + if (flags == VCHI_FLAGS_NONE)
11765 + if (vchiu_queue_is_empty(&service->queue))
11768 + header = vchiu_queue_pop(&service->queue);
11770 + *data = header->data;
11771 + *msg_size = header->size;
11773 + message_handle->service =
11774 + (struct opaque_vchi_service_t *)service->handle;
11775 + message_handle->message = header;
11779 +EXPORT_SYMBOL(vchi_msg_hold);
11781 +/***********************************************************
11782 + * Name: vchi_initialise
11784 + * Arguments: VCHI_INSTANCE_T *instance_handle
11785 + * VCHI_CONNECTION_T **connections
11786 + * const uint32_t num_connections
11788 + * Description: Initialises the hardware but does not transmit anything
11789 + * When run as a Host App this will be called twice hence the need
11790 + * to malloc the state information
11792 + * Returns: 0 if successful, failure otherwise
11794 + ***********************************************************/
11796 +int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
11798 + VCHIQ_INSTANCE_T instance;
11799 + VCHIQ_STATUS_T status;
11801 + status = vchiq_initialise(&instance);
11803 + *instance_handle = (VCHI_INSTANCE_T)instance;
11805 + return vchiq_status_to_vchi(status);
11807 +EXPORT_SYMBOL(vchi_initialise);
11809 +/***********************************************************
11810 + * Name: vchi_connect
11812 + * Arguments: VCHI_CONNECTION_T **connections
11813 + * const uint32_t num_connections
11814 + * VCHI_INSTANCE_T instance_handle)
11816 + * Description: Starts the command service on each connection,
11817 + * causing INIT messages to be pinged back and forth
11819 + * Returns: 0 if successful, failure otherwise
11821 + ***********************************************************/
11822 +int32_t vchi_connect(VCHI_CONNECTION_T **connections,
11823 + const uint32_t num_connections,
11824 + VCHI_INSTANCE_T instance_handle)
11826 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11828 + (void)connections;
11829 + (void)num_connections;
11831 + return vchiq_connect(instance);
11833 +EXPORT_SYMBOL(vchi_connect);
11836 +/***********************************************************
11837 + * Name: vchi_disconnect
11839 + * Arguments: VCHI_INSTANCE_T instance_handle
11841 + * Description: Stops the command service on each connection,
11842 + * causing DE-INIT messages to be pinged back and forth
11844 + * Returns: 0 if successful, failure otherwise
11846 + ***********************************************************/
11847 +int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
11849 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11850 + return vchiq_status_to_vchi(vchiq_shutdown(instance));
11852 +EXPORT_SYMBOL(vchi_disconnect);
11855 +/***********************************************************
11856 + * Name: vchi_service_open
11857 + * Name: vchi_service_create
11859 + * Arguments: VCHI_INSTANCE_T *instance_handle
11860 + * SERVICE_CREATION_T *setup,
11861 + * VCHI_SERVICE_HANDLE_T *handle
11863 + * Description: Routine to open a service
11865 + * Returns: int32_t - success == 0
11867 + ***********************************************************/
11869 +static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
11870 + VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
11872 + SHIM_SERVICE_T *service =
11873 + (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
11875 + if (!service->callback)
11878 + switch (reason) {
11879 + case VCHIQ_MESSAGE_AVAILABLE:
11880 + vchiu_queue_push(&service->queue, header);
11882 + service->callback(service->callback_param,
11883 + VCHI_CALLBACK_MSG_AVAILABLE, NULL);
11888 + case VCHIQ_BULK_TRANSMIT_DONE:
11889 + service->callback(service->callback_param,
11890 + VCHI_CALLBACK_BULK_SENT, bulk_user);
11893 + case VCHIQ_BULK_RECEIVE_DONE:
11894 + service->callback(service->callback_param,
11895 + VCHI_CALLBACK_BULK_RECEIVED, bulk_user);
11898 + case VCHIQ_SERVICE_CLOSED:
11899 + service->callback(service->callback_param,
11900 + VCHI_CALLBACK_SERVICE_CLOSED, NULL);
11903 + case VCHIQ_SERVICE_OPENED:
11904 + /* No equivalent VCHI reason */
11907 + case VCHIQ_BULK_TRANSMIT_ABORTED:
11908 + service->callback(service->callback_param,
11909 + VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
11913 + case VCHIQ_BULK_RECEIVE_ABORTED:
11914 + service->callback(service->callback_param,
11915 + VCHI_CALLBACK_BULK_RECEIVE_ABORTED,
11920 + WARN(1, "not supported\n");
11925 + vchiq_release_message(service->handle, header);
11927 + return VCHIQ_SUCCESS;
11930 +static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
11931 + SERVICE_CREATION_T *setup)
11933 + SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
11938 + if (vchiu_queue_init(&service->queue, 64)) {
11939 + service->callback = setup->callback;
11940 + service->callback_param = setup->callback_param;
11950 +static void service_free(SHIM_SERVICE_T *service)
11953 + vchiu_queue_delete(&service->queue);
11958 +int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
11959 + SERVICE_CREATION_T *setup,
11960 + VCHI_SERVICE_HANDLE_T *handle)
11962 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11963 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
11965 + VCHIQ_SERVICE_PARAMS_T params;
11966 + VCHIQ_STATUS_T status;
11968 + memset(¶ms, 0, sizeof(params));
11969 + params.fourcc = setup->service_id;
11970 + params.callback = shim_callback;
11971 + params.userdata = service;
11972 + params.version = setup->version.version;
11973 + params.version_min = setup->version.version_min;
11975 + status = vchiq_open_service(instance, ¶ms,
11976 + &service->handle);
11977 + if (status != VCHIQ_SUCCESS) {
11978 + service_free(service);
11983 + *handle = (VCHI_SERVICE_HANDLE_T)service;
11985 + return (service != NULL) ? 0 : -1;
11987 +EXPORT_SYMBOL(vchi_service_open);
11989 +int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
11990 + SERVICE_CREATION_T *setup,
11991 + VCHI_SERVICE_HANDLE_T *handle)
11993 + VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
11994 + SHIM_SERVICE_T *service = service_alloc(instance, setup);
11996 + VCHIQ_SERVICE_PARAMS_T params;
11997 + VCHIQ_STATUS_T status;
11999 + memset(¶ms, 0, sizeof(params));
12000 + params.fourcc = setup->service_id;
12001 + params.callback = shim_callback;
12002 + params.userdata = service;
12003 + params.version = setup->version.version;
12004 + params.version_min = setup->version.version_min;
12005 + status = vchiq_add_service(instance, ¶ms, &service->handle);
12007 + if (status != VCHIQ_SUCCESS) {
12008 + service_free(service);
12013 + *handle = (VCHI_SERVICE_HANDLE_T)service;
12015 + return (service != NULL) ? 0 : -1;
12017 +EXPORT_SYMBOL(vchi_service_create);
12019 +int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
12021 + int32_t ret = -1;
12022 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12024 + VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
12025 + if (status == VCHIQ_SUCCESS) {
12026 + service_free(service);
12030 + ret = vchiq_status_to_vchi(status);
12034 +EXPORT_SYMBOL(vchi_service_close);
12036 +int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
12038 + int32_t ret = -1;
12039 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12041 + VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
12042 + if (status == VCHIQ_SUCCESS) {
12043 + service_free(service);
12047 + ret = vchiq_status_to_vchi(status);
12051 +EXPORT_SYMBOL(vchi_service_destroy);
12053 +int32_t vchi_get_peer_version( const VCHI_SERVICE_HANDLE_T handle, short *peer_version )
12055 + int32_t ret = -1;
12056 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12059 + VCHIQ_STATUS_T status = vchiq_get_peer_version(service->handle, peer_version);
12060 + ret = vchiq_status_to_vchi( status );
12064 +EXPORT_SYMBOL(vchi_get_peer_version);
12066 +/* ----------------------------------------------------------------------
12067 + * read a uint32_t from buffer.
12068 + * network format is defined to be little endian
12069 + * -------------------------------------------------------------------- */
12071 +vchi_readbuf_uint32(const void *_ptr)
12073 + const unsigned char *ptr = _ptr;
12074 + return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
12077 +/* ----------------------------------------------------------------------
12078 + * write a uint32_t to buffer.
12079 + * network format is defined to be little endian
12080 + * -------------------------------------------------------------------- */
12082 +vchi_writebuf_uint32(void *_ptr, uint32_t value)
12084 + unsigned char *ptr = _ptr;
12085 + ptr[0] = (unsigned char)((value >> 0) & 0xFF);
12086 + ptr[1] = (unsigned char)((value >> 8) & 0xFF);
12087 + ptr[2] = (unsigned char)((value >> 16) & 0xFF);
12088 + ptr[3] = (unsigned char)((value >> 24) & 0xFF);
12091 +/* ----------------------------------------------------------------------
12092 + * read a uint16_t from buffer.
12093 + * network format is defined to be little endian
12094 + * -------------------------------------------------------------------- */
12096 +vchi_readbuf_uint16(const void *_ptr)
12098 + const unsigned char *ptr = _ptr;
12099 + return ptr[0] | (ptr[1] << 8);
12102 +/* ----------------------------------------------------------------------
12103 + * write a uint16_t into the buffer.
12104 + * network format is defined to be little endian
12105 + * -------------------------------------------------------------------- */
12107 +vchi_writebuf_uint16(void *_ptr, uint16_t value)
12109 + unsigned char *ptr = _ptr;
12110 + ptr[0] = (value >> 0) & 0xFF;
12111 + ptr[1] = (value >> 8) & 0xFF;
12114 +/***********************************************************
12115 + * Name: vchi_service_use
12117 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12119 + * Description: Routine to increment refcount on a service
12123 + ***********************************************************/
12124 +int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
12126 + int32_t ret = -1;
12127 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12129 + ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
12132 +EXPORT_SYMBOL(vchi_service_use);
12134 +/***********************************************************
12135 + * Name: vchi_service_release
12137 + * Arguments: const VCHI_SERVICE_HANDLE_T handle
12139 + * Description: Routine to decrement refcount on a service
12143 + ***********************************************************/
12144 +int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
12146 + int32_t ret = -1;
12147 + SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
12149 + ret = vchiq_status_to_vchi(
12150 + vchiq_release_service(service->handle));
12153 +EXPORT_SYMBOL(vchi_service_release);
12155 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c
12158 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12160 + * Redistribution and use in source and binary forms, with or without
12161 + * modification, are permitted provided that the following conditions
12163 + * 1. Redistributions of source code must retain the above copyright
12164 + * notice, this list of conditions, and the following disclaimer,
12165 + * without modification.
12166 + * 2. Redistributions in binary form must reproduce the above copyright
12167 + * notice, this list of conditions and the following disclaimer in the
12168 + * documentation and/or other materials provided with the distribution.
12169 + * 3. The names of the above-listed copyright holders may not be used
12170 + * to endorse or promote products derived from this software without
12171 + * specific prior written permission.
12173 + * ALTERNATIVELY, this software may be distributed under the terms of the
12174 + * GNU General Public License ("GPL") version 2, as published by the Free
12175 + * Software Foundation.
12177 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12178 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12179 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12180 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12181 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12182 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12183 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12184 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12185 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12186 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12187 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12190 +#include "vchiq_util.h"
12192 +static inline int is_pow2(int i)
12194 + return i && !(i & (i - 1));
12197 +int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
12199 + WARN_ON(!is_pow2(size));
12201 + queue->size = size;
12203 + queue->write = 0;
12205 + sema_init(&queue->pop, 0);
12206 + sema_init(&queue->push, 0);
12208 + queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
12209 + if (queue->storage == NULL) {
12210 + vchiu_queue_delete(queue);
12216 +void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
12218 + if (queue->storage != NULL)
12219 + kfree(queue->storage);
12222 +int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
12224 + return queue->read == queue->write;
12227 +int vchiu_queue_is_full(VCHIU_QUEUE_T *queue)
12229 + return queue->write == queue->read + queue->size;
12232 +void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header)
12234 + while (queue->write == queue->read + queue->size) {
12235 + if (down_interruptible(&queue->pop) != 0) {
12236 + flush_signals(current);
12241 + * Write to queue->storage must be visible after read from
12246 + queue->storage[queue->write & (queue->size - 1)] = header;
12249 + * Write to queue->storage must be visible before write to
12256 + up(&queue->push);
12259 +VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue)
12261 + while (queue->write == queue->read) {
12262 + if (down_interruptible(&queue->push) != 0) {
12263 + flush_signals(current);
12267 + up(&queue->push); // We haven't removed anything from the queue.
12270 + * Read from queue->storage must be visible after read from
12275 + return queue->storage[queue->read & (queue->size - 1)];
12278 +VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue)
12280 + VCHIQ_HEADER_T *header;
12282 + while (queue->write == queue->read) {
12283 + if (down_interruptible(&queue->push) != 0) {
12284 + flush_signals(current);
12289 + * Read from queue->storage must be visible after read from
12294 + header = queue->storage[queue->read & (queue->size - 1)];
12297 + * Read from queue->storage must be visible before write to
12309 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h
12312 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12314 + * Redistribution and use in source and binary forms, with or without
12315 + * modification, are permitted provided that the following conditions
12317 + * 1. Redistributions of source code must retain the above copyright
12318 + * notice, this list of conditions, and the following disclaimer,
12319 + * without modification.
12320 + * 2. Redistributions in binary form must reproduce the above copyright
12321 + * notice, this list of conditions and the following disclaimer in the
12322 + * documentation and/or other materials provided with the distribution.
12323 + * 3. The names of the above-listed copyright holders may not be used
12324 + * to endorse or promote products derived from this software without
12325 + * specific prior written permission.
12327 + * ALTERNATIVELY, this software may be distributed under the terms of the
12328 + * GNU General Public License ("GPL") version 2, as published by the Free
12329 + * Software Foundation.
12331 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12332 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12333 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12334 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12335 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12336 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12337 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12338 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12339 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12340 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12341 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12344 +#ifndef VCHIQ_UTIL_H
12345 +#define VCHIQ_UTIL_H
12347 +#include <linux/types.h>
12348 +#include <linux/semaphore.h>
12349 +#include <linux/mutex.h>
12350 +#include <linux/bitops.h>
12351 +#include <linux/kthread.h>
12352 +#include <linux/wait.h>
12353 +#include <linux/vmalloc.h>
12354 +#include <linux/jiffies.h>
12355 +#include <linux/delay.h>
12356 +#include <linux/string.h>
12357 +#include <linux/types.h>
12358 +#include <linux/interrupt.h>
12359 +#include <linux/random.h>
12360 +#include <linux/sched.h>
12361 +#include <linux/ctype.h>
12362 +#include <linux/uaccess.h>
12363 +#include <linux/time.h> /* for time_t */
12364 +#include <linux/slab.h>
12365 +#include <linux/vmalloc.h>
12367 +#include "vchiq_if.h"
12374 + struct semaphore pop;
12375 + struct semaphore push;
12377 + VCHIQ_HEADER_T **storage;
12380 +extern int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size);
12381 +extern void vchiu_queue_delete(VCHIU_QUEUE_T *queue);
12383 +extern int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue);
12384 +extern int vchiu_queue_is_full(VCHIU_QUEUE_T *queue);
12386 +extern void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header);
12388 +extern VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue);
12389 +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue);
12393 +++ b/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c
12396 + * Copyright (c) 2010-2012 Broadcom. All rights reserved.
12398 + * Redistribution and use in source and binary forms, with or without
12399 + * modification, are permitted provided that the following conditions
12401 + * 1. Redistributions of source code must retain the above copyright
12402 + * notice, this list of conditions, and the following disclaimer,
12403 + * without modification.
12404 + * 2. Redistributions in binary form must reproduce the above copyright
12405 + * notice, this list of conditions and the following disclaimer in the
12406 + * documentation and/or other materials provided with the distribution.
12407 + * 3. The names of the above-listed copyright holders may not be used
12408 + * to endorse or promote products derived from this software without
12409 + * specific prior written permission.
12411 + * ALTERNATIVELY, this software may be distributed under the terms of the
12412 + * GNU General Public License ("GPL") version 2, as published by the Free
12413 + * Software Foundation.
12415 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12416 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
12417 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
12418 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
12419 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
12420 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
12421 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
12422 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
12423 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
12424 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12425 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12427 +#include "vchiq_build_info.h"
12428 +#include <linux/broadcom/vc_debug_sym.h>
12430 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
12431 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
12432 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
12433 +VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
12435 +const char *vchiq_get_build_hostname( void )
12437 + return vchiq_build_hostname;
12440 +const char *vchiq_get_build_version( void )
12442 + return vchiq_build_version;
12445 +const char *vchiq_get_build_date( void )
12447 + return vchiq_build_date;
12450 +const char *vchiq_get_build_time( void )
12452 + return vchiq_build_time;