2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "vchiq_core.h"
36 #define VCHIQ_SLOT_HANDLER_STACK 8192
38 #define HANDLE_STATE_SHIFT 12
40 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
41 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
42 #define SLOT_INDEX_FROM_DATA(state, data) \
43 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
45 #define SLOT_INDEX_FROM_INFO(state, info) \
46 ((unsigned int)(info - state->slot_info))
47 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
48 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
50 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
52 #define SRVTRACE_LEVEL(srv) \
53 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
54 #define SRVTRACE_ENABLED(srv, lev) \
55 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
57 struct vchiq_open_payload {
64 struct vchiq_openack_payload {
69 QMFLAGS_IS_BLOCKING = (1 << 0),
70 QMFLAGS_NO_MUTEX_LOCK = (1 << 1),
71 QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2)
74 /* we require this for consistency between endpoints */
75 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
76 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
77 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
78 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
79 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
80 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
82 /* Run time control of log level, based on KERN_XXX level. */
83 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
84 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
85 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
87 static DEFINE_SPINLOCK(service_spinlock);
88 DEFINE_SPINLOCK(bulk_waiter_spinlock);
89 static DEFINE_SPINLOCK(quota_spinlock);
91 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
92 static unsigned int handle_seq;
94 static const char *const srvstate_names[] = {
107 static const char *const reason_names[] = {
111 "BULK_TRANSMIT_DONE",
113 "BULK_TRANSMIT_ABORTED",
114 "BULK_RECEIVE_ABORTED"
117 static const char *const conn_state_names[] = {
130 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
132 static const char *msg_type_str(unsigned int msg_type)
135 case VCHIQ_MSG_PADDING: return "PADDING";
136 case VCHIQ_MSG_CONNECT: return "CONNECT";
137 case VCHIQ_MSG_OPEN: return "OPEN";
138 case VCHIQ_MSG_OPENACK: return "OPENACK";
139 case VCHIQ_MSG_CLOSE: return "CLOSE";
140 case VCHIQ_MSG_DATA: return "DATA";
141 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
142 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
143 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
144 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
145 case VCHIQ_MSG_PAUSE: return "PAUSE";
146 case VCHIQ_MSG_RESUME: return "RESUME";
147 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
148 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
149 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
155 vchiq_set_service_state(struct vchiq_service *service, int newstate)
157 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
158 service->state->id, service->localport,
159 srvstate_names[service->srvstate],
160 srvstate_names[newstate]);
161 service->srvstate = newstate;
164 struct vchiq_service *
165 find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
167 struct vchiq_service *service;
169 spin_lock(&service_spinlock);
170 service = handle_to_service(handle);
171 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
172 (service->handle == handle)) {
173 WARN_ON(service->ref_count == 0);
174 service->ref_count++;
177 spin_unlock(&service_spinlock);
180 vchiq_log_info(vchiq_core_log_level,
181 "Invalid service handle 0x%x", handle);
186 struct vchiq_service *
187 find_service_by_port(struct vchiq_state *state, int localport)
189 struct vchiq_service *service = NULL;
191 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
192 spin_lock(&service_spinlock);
193 service = state->services[localport];
194 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
195 WARN_ON(service->ref_count == 0);
196 service->ref_count++;
199 spin_unlock(&service_spinlock);
203 vchiq_log_info(vchiq_core_log_level,
204 "Invalid port %d", localport);
209 struct vchiq_service *
210 find_service_for_instance(VCHIQ_INSTANCE_T instance,
211 VCHIQ_SERVICE_HANDLE_T handle)
213 struct vchiq_service *service;
215 spin_lock(&service_spinlock);
216 service = handle_to_service(handle);
217 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
218 (service->handle == handle) &&
219 (service->instance == instance)) {
220 WARN_ON(service->ref_count == 0);
221 service->ref_count++;
224 spin_unlock(&service_spinlock);
227 vchiq_log_info(vchiq_core_log_level,
228 "Invalid service handle 0x%x", handle);
233 struct vchiq_service *
234 find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
235 VCHIQ_SERVICE_HANDLE_T handle)
237 struct vchiq_service *service;
239 spin_lock(&service_spinlock);
240 service = handle_to_service(handle);
242 ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
243 (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
244 (service->handle == handle) &&
245 (service->instance == instance)) {
246 WARN_ON(service->ref_count == 0);
247 service->ref_count++;
250 spin_unlock(&service_spinlock);
253 vchiq_log_info(vchiq_core_log_level,
254 "Invalid service handle 0x%x", handle);
259 struct vchiq_service *
260 next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
263 struct vchiq_service *service = NULL;
266 spin_lock(&service_spinlock);
267 while (idx < state->unused_service) {
268 struct vchiq_service *srv = state->services[idx++];
270 if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
271 (srv->instance == instance)) {
273 WARN_ON(service->ref_count == 0);
274 service->ref_count++;
278 spin_unlock(&service_spinlock);
286 lock_service(struct vchiq_service *service)
288 spin_lock(&service_spinlock);
291 WARN_ON(service->ref_count == 0);
292 service->ref_count++;
294 spin_unlock(&service_spinlock);
298 unlock_service(struct vchiq_service *service)
300 spin_lock(&service_spinlock);
302 WARN(1, "%s: service is NULL\n", __func__);
305 if (!service->ref_count) {
306 WARN(1, "%s: ref_count is zero\n", __func__);
309 service->ref_count--;
310 if (!service->ref_count) {
311 struct vchiq_state *state = service->state;
313 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
314 state->services[service->localport] = NULL;
319 spin_unlock(&service_spinlock);
321 if (service && service->userdata_term)
322 service->userdata_term(service->base.userdata);
328 vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
330 struct vchiq_service *service = find_service_by_handle(handle);
333 id = service ? service->client_id : 0;
335 unlock_service(service);
341 vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
343 struct vchiq_service *service = handle_to_service(handle);
345 return service ? service->base.userdata : NULL;
349 vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
351 struct vchiq_service *service = handle_to_service(handle);
353 return service ? service->base.fourcc : 0;
357 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
359 struct vchiq_state *state = service->state;
360 struct vchiq_service_quota *service_quota;
362 service->closing = 1;
364 /* Synchronise with other threads. */
365 mutex_lock(&state->recycle_mutex);
366 mutex_unlock(&state->recycle_mutex);
367 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
368 /* If we're pausing then the slot_mutex is held until resume
369 * by the slot handler. Therefore don't try to acquire this
370 * mutex if we're the slot handler and in the pause sent state.
371 * We don't need to in this case anyway. */
372 mutex_lock(&state->slot_mutex);
373 mutex_unlock(&state->slot_mutex);
376 /* Unblock any sending thread. */
377 service_quota = &state->service_quotas[service->localport];
378 complete(&service_quota->quota_event);
382 mark_service_closing(struct vchiq_service *service)
384 mark_service_closing_internal(service, 0);
387 static inline VCHIQ_STATUS_T
388 make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
389 struct vchiq_header *header, void *bulk_userdata)
391 VCHIQ_STATUS_T status;
393 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
394 service->state->id, service->localport, reason_names[reason],
395 header, bulk_userdata);
396 status = service->base.callback(reason, header, service->handle,
398 if (status == VCHIQ_ERROR) {
399 vchiq_log_warning(vchiq_core_log_level,
400 "%d: ignoring ERROR from callback to service %x",
401 service->state->id, service->handle);
402 status = VCHIQ_SUCCESS;
408 vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate)
410 VCHIQ_CONNSTATE_T oldstate = state->conn_state;
412 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
413 conn_state_names[oldstate],
414 conn_state_names[newstate]);
415 state->conn_state = newstate;
416 vchiq_platform_conn_state_changed(state, oldstate, newstate);
420 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
423 /* Don't clear the 'fired' flag because it may already have been set
424 ** by the other side. */
425 init_waitqueue_head(wq);
429 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
434 if (wait_event_killable(*wq, event->fired)) {
447 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
454 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
456 if (event->fired && event->armed)
457 remote_event_signal_local(wq, event);
461 remote_event_pollall(struct vchiq_state *state)
463 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
464 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
465 remote_event_poll(&state->trigger_event, &state->local->trigger);
466 remote_event_poll(&state->recycle_event, &state->local->recycle);
469 /* Round up message sizes so that any space at the end of a slot is always big
470 ** enough for a header. This relies on header size being a power of two, which
471 ** has been verified earlier by a static assertion. */
474 calc_stride(size_t size)
476 /* Allow room for the header */
477 size += sizeof(struct vchiq_header);
480 return (size + sizeof(struct vchiq_header) - 1) &
481 ~(sizeof(struct vchiq_header) - 1);
484 /* Called by the slot handler thread */
485 static struct vchiq_service *
486 get_listening_service(struct vchiq_state *state, int fourcc)
490 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
492 for (i = 0; i < state->unused_service; i++) {
493 struct vchiq_service *service = state->services[i];
496 (service->public_fourcc == fourcc) &&
497 ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
498 ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
499 (service->remoteport == VCHIQ_PORT_FREE)))) {
500 lock_service(service);
508 /* Called by the slot handler thread */
509 static struct vchiq_service *
510 get_connected_service(struct vchiq_state *state, unsigned int port)
514 for (i = 0; i < state->unused_service; i++) {
515 struct vchiq_service *service = state->services[i];
517 if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
518 && (service->remoteport == port)) {
519 lock_service(service);
527 request_poll(struct vchiq_state *state, struct vchiq_service *service,
534 value = atomic_read(&service->poll_flags);
535 } while (atomic_cmpxchg(&service->poll_flags, value,
536 value | (1 << poll_type)) != value);
539 value = atomic_read(&state->poll_services[
540 service->localport>>5]);
541 } while (atomic_cmpxchg(
542 &state->poll_services[service->localport>>5],
543 value, value | (1 << (service->localport & 0x1f)))
547 state->poll_needed = 1;
550 /* ... and ensure the slot handler runs. */
551 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
554 /* Called from queue_message, by the slot handler and application threads,
555 ** with slot_mutex held */
556 static struct vchiq_header *
557 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
559 struct vchiq_shared_state *local = state->local;
560 int tx_pos = state->local_tx_pos;
561 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
563 if (space > slot_space) {
564 struct vchiq_header *header;
565 /* Fill the remaining space with padding */
566 WARN_ON(state->tx_data == NULL);
567 header = (struct vchiq_header *)
568 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
569 header->msgid = VCHIQ_MSGID_PADDING;
570 header->size = slot_space - sizeof(struct vchiq_header);
572 tx_pos += slot_space;
575 /* If necessary, get the next slot. */
576 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
579 /* If there is no free slot... */
581 if (!try_wait_for_completion(&state->slot_available_event)) {
582 /* ...wait for one. */
584 VCHIQ_STATS_INC(state, slot_stalls);
586 /* But first, flush through the last slot. */
587 state->local_tx_pos = tx_pos;
588 local->tx_pos = tx_pos;
589 remote_event_signal(&state->remote->trigger);
592 (wait_for_completion_killable(
593 &state->slot_available_event)))
594 return NULL; /* No space available */
597 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
598 complete(&state->slot_available_event);
599 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
603 slot_index = local->slot_queue[
604 SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
605 VCHIQ_SLOT_QUEUE_MASK];
607 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
610 state->local_tx_pos = tx_pos + space;
612 return (struct vchiq_header *)(state->tx_data +
613 (tx_pos & VCHIQ_SLOT_MASK));
616 /* Called by the recycle thread. */
618 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
621 struct vchiq_shared_state *local = state->local;
622 int slot_queue_available;
624 /* Find slots which have been freed by the other side, and return them
625 ** to the available queue. */
626 slot_queue_available = state->slot_queue_available;
629 * Use a memory barrier to ensure that any state that may have been
630 * modified by another thread is not masked by stale prefetched
635 while (slot_queue_available != local->slot_queue_recycle) {
637 int slot_index = local->slot_queue[slot_queue_available++ &
638 VCHIQ_SLOT_QUEUE_MASK];
639 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
643 * Beware of the address dependency - data is calculated
644 * using an index written by the other side.
648 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
649 state->id, slot_index, data,
650 local->slot_queue_recycle, slot_queue_available);
652 /* Initialise the bitmask for services which have used this
654 memset(service_found, 0, length);
658 while (pos < VCHIQ_SLOT_SIZE) {
659 struct vchiq_header *header =
660 (struct vchiq_header *)(data + pos);
661 int msgid = header->msgid;
663 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
664 int port = VCHIQ_MSG_SRCPORT(msgid);
665 struct vchiq_service_quota *service_quota =
666 &state->service_quotas[port];
669 spin_lock("a_spinlock);
670 count = service_quota->message_use_count;
672 service_quota->message_use_count =
674 spin_unlock("a_spinlock);
676 if (count == service_quota->message_quota)
677 /* Signal the service that it
678 ** has dropped below its quota
680 complete(&service_quota->quota_event);
681 else if (count == 0) {
682 vchiq_log_error(vchiq_core_log_level,
683 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
685 service_quota->message_use_count,
686 header, msgid, header->msgid,
688 WARN(1, "invalid message use count\n");
690 if (!BITSET_IS_SET(service_found, port)) {
691 /* Set the found bit for this service */
692 BITSET_SET(service_found, port);
694 spin_lock("a_spinlock);
695 count = service_quota->slot_use_count;
697 service_quota->slot_use_count =
699 spin_unlock("a_spinlock);
702 /* Signal the service in case
703 ** it has dropped below its
705 complete(&service_quota->quota_event);
707 vchiq_core_log_level,
708 "%d: pfq:%d %x@%pK - slot_use->%d",
710 header->size, header,
714 vchiq_core_log_level,
715 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
717 msgid, header->msgid,
719 WARN(1, "bad slot use count\n");
726 pos += calc_stride(header->size);
727 if (pos > VCHIQ_SLOT_SIZE) {
728 vchiq_log_error(vchiq_core_log_level,
729 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
730 pos, header, msgid, header->msgid,
732 WARN(1, "invalid slot position\n");
739 spin_lock("a_spinlock);
740 count = state->data_use_count;
742 state->data_use_count =
744 spin_unlock("a_spinlock);
745 if (count == state->data_quota)
746 complete(&state->data_quota_event);
750 * Don't allow the slot to be reused until we are no
751 * longer interested in it.
755 state->slot_queue_available = slot_queue_available;
756 complete(&state->slot_available_event);
761 memcpy_copy_callback(
762 void *context, void *dest,
763 size_t offset, size_t maxsize)
765 memcpy(dest + offset, context + offset, maxsize);
771 ssize_t (*copy_callback)(void *context, void *dest,
772 size_t offset, size_t maxsize),
780 ssize_t callback_result;
781 size_t max_bytes = size - pos;
784 copy_callback(context, dest + pos,
787 if (callback_result < 0)
788 return callback_result;
790 if (!callback_result)
793 if (callback_result > max_bytes)
796 pos += callback_result;
802 /* Called by the slot handler and application threads */
803 static VCHIQ_STATUS_T
804 queue_message(struct vchiq_state *state, struct vchiq_service *service,
806 ssize_t (*copy_callback)(void *context, void *dest,
807 size_t offset, size_t maxsize),
808 void *context, size_t size, int flags)
810 struct vchiq_shared_state *local;
811 struct vchiq_service_quota *service_quota = NULL;
812 struct vchiq_header *header;
813 int type = VCHIQ_MSG_TYPE(msgid);
817 local = state->local;
819 stride = calc_stride(size);
821 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
823 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
824 (mutex_lock_killable(&state->slot_mutex) != 0))
827 if (type == VCHIQ_MSG_DATA) {
831 WARN(1, "%s: service is NULL\n", __func__);
832 mutex_unlock(&state->slot_mutex);
836 WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
837 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
839 if (service->closing) {
840 /* The service has been closed */
841 mutex_unlock(&state->slot_mutex);
845 service_quota = &state->service_quotas[service->localport];
847 spin_lock("a_spinlock);
849 /* Ensure this service doesn't use more than its quota of
850 ** messages or slots */
851 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
852 state->local_tx_pos + stride - 1);
854 /* Ensure data messages don't use more than their quota of
856 while ((tx_end_index != state->previous_data_index) &&
857 (state->data_use_count == state->data_quota)) {
858 VCHIQ_STATS_INC(state, data_stalls);
859 spin_unlock("a_spinlock);
860 mutex_unlock(&state->slot_mutex);
862 if (wait_for_completion_killable(
863 &state->data_quota_event))
866 mutex_lock(&state->slot_mutex);
867 spin_lock("a_spinlock);
868 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
869 state->local_tx_pos + stride - 1);
870 if ((tx_end_index == state->previous_data_index) ||
871 (state->data_use_count < state->data_quota)) {
872 /* Pass the signal on to other waiters */
873 complete(&state->data_quota_event);
878 while ((service_quota->message_use_count ==
879 service_quota->message_quota) ||
880 ((tx_end_index != service_quota->previous_tx_index) &&
881 (service_quota->slot_use_count ==
882 service_quota->slot_quota))) {
883 spin_unlock("a_spinlock);
884 vchiq_log_trace(vchiq_core_log_level,
885 "%d: qm:%d %s,%zx - quota stall "
887 state->id, service->localport,
888 msg_type_str(type), size,
889 service_quota->message_use_count,
890 service_quota->slot_use_count);
891 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
892 mutex_unlock(&state->slot_mutex);
893 if (wait_for_completion_killable(
894 &service_quota->quota_event))
896 if (service->closing)
898 if (mutex_lock_killable(&state->slot_mutex) != 0)
900 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
901 /* The service has been closed */
902 mutex_unlock(&state->slot_mutex);
905 spin_lock("a_spinlock);
906 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
907 state->local_tx_pos + stride - 1);
910 spin_unlock("a_spinlock);
913 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
917 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
918 /* In the event of a failure, return the mutex to the
920 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
921 mutex_unlock(&state->slot_mutex);
925 if (type == VCHIQ_MSG_DATA) {
926 ssize_t callback_result;
930 vchiq_log_info(vchiq_core_log_level,
931 "%d: qm %s@%pK,%zx (%d->%d)",
932 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
933 header, size, VCHIQ_MSG_SRCPORT(msgid),
934 VCHIQ_MSG_DSTPORT(msgid));
936 WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
937 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
940 copy_message_data(copy_callback, context,
943 if (callback_result < 0) {
944 mutex_unlock(&state->slot_mutex);
945 VCHIQ_SERVICE_STATS_INC(service,
950 if (SRVTRACE_ENABLED(service,
952 vchiq_log_dump_mem("Sent", 0,
955 (size_t)callback_result));
957 spin_lock("a_spinlock);
958 service_quota->message_use_count++;
961 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
963 /* If this transmission can't fit in the last slot used by any
964 ** service, the data_use_count must be increased. */
965 if (tx_end_index != state->previous_data_index) {
966 state->previous_data_index = tx_end_index;
967 state->data_use_count++;
970 /* If this isn't the same slot last used by this service,
971 ** the service's slot_use_count must be increased. */
972 if (tx_end_index != service_quota->previous_tx_index) {
973 service_quota->previous_tx_index = tx_end_index;
974 slot_use_count = ++service_quota->slot_use_count;
979 spin_unlock("a_spinlock);
982 vchiq_log_trace(vchiq_core_log_level,
983 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
984 state->id, service->localport,
985 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
986 slot_use_count, header);
988 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
989 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
991 vchiq_log_info(vchiq_core_log_level,
992 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
993 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
994 header, size, VCHIQ_MSG_SRCPORT(msgid),
995 VCHIQ_MSG_DSTPORT(msgid));
997 /* It is assumed for now that this code path
998 * only happens from calls inside this file.
1000 * External callers are through the vchiq_queue_message
1001 * path which always sets the type to be VCHIQ_MSG_DATA
1003 * At first glance this appears to be correct but
1004 * more review is needed.
1006 copy_message_data(copy_callback, context,
1007 header->data, size);
1009 VCHIQ_STATS_INC(state, ctrl_tx_count);
1012 header->msgid = msgid;
1013 header->size = size;
1018 svc_fourcc = service
1019 ? service->base.fourcc
1020 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1022 vchiq_log_info(SRVTRACE_LEVEL(service),
1023 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1024 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1025 VCHIQ_MSG_TYPE(msgid),
1026 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1027 VCHIQ_MSG_SRCPORT(msgid),
1028 VCHIQ_MSG_DSTPORT(msgid),
1032 /* Make sure the new header is visible to the peer. */
1035 /* Make the new tx_pos visible to the peer. */
1036 local->tx_pos = state->local_tx_pos;
1039 if (service && (type == VCHIQ_MSG_CLOSE))
1040 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1042 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1043 mutex_unlock(&state->slot_mutex);
1045 remote_event_signal(&state->remote->trigger);
1047 return VCHIQ_SUCCESS;
1050 /* Called by the slot handler and application threads */
1051 static VCHIQ_STATUS_T
1052 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1054 ssize_t (*copy_callback)(void *context, void *dest,
1055 size_t offset, size_t maxsize),
1056 void *context, int size, int is_blocking)
1058 struct vchiq_shared_state *local;
1059 struct vchiq_header *header;
1060 ssize_t callback_result;
1062 local = state->local;
1064 if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
1065 (mutex_lock_killable(&state->sync_mutex) != 0))
1068 remote_event_wait(&state->sync_release_event, &local->sync_release);
1072 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1076 int oldmsgid = header->msgid;
1078 if (oldmsgid != VCHIQ_MSGID_PADDING)
1079 vchiq_log_error(vchiq_core_log_level,
1080 "%d: qms - msgid %x, not PADDING",
1081 state->id, oldmsgid);
1084 vchiq_log_info(vchiq_sync_log_level,
1085 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1086 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1087 header, size, VCHIQ_MSG_SRCPORT(msgid),
1088 VCHIQ_MSG_DSTPORT(msgid));
1091 copy_message_data(copy_callback, context,
1092 header->data, size);
1094 if (callback_result < 0) {
1095 mutex_unlock(&state->slot_mutex);
1096 VCHIQ_SERVICE_STATS_INC(service,
1102 if (SRVTRACE_ENABLED(service,
1104 vchiq_log_dump_mem("Sent", 0,
1107 (size_t)callback_result));
1109 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1110 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1112 VCHIQ_STATS_INC(state, ctrl_tx_count);
1115 header->size = size;
1116 header->msgid = msgid;
1118 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1121 svc_fourcc = service
1122 ? service->base.fourcc
1123 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1125 vchiq_log_trace(vchiq_sync_log_level,
1126 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1127 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1128 VCHIQ_MSG_TYPE(msgid),
1129 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1130 VCHIQ_MSG_SRCPORT(msgid),
1131 VCHIQ_MSG_DSTPORT(msgid),
1135 remote_event_signal(&state->remote->sync_trigger);
1137 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1138 mutex_unlock(&state->sync_mutex);
1140 return VCHIQ_SUCCESS;
1144 claim_slot(struct vchiq_slot_info *slot)
1150 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1151 struct vchiq_header *header, struct vchiq_service *service)
1155 mutex_lock(&state->recycle_mutex);
1158 int msgid = header->msgid;
1160 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1161 (service && service->closing)) {
1162 mutex_unlock(&state->recycle_mutex);
1166 /* Rewrite the message header to prevent a double
1168 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1171 release_count = slot_info->release_count;
1172 slot_info->release_count = ++release_count;
1174 if (release_count == slot_info->use_count) {
1175 int slot_queue_recycle;
1176 /* Add to the freed queue */
1178 /* A read barrier is necessary here to prevent speculative
1179 ** fetches of remote->slot_queue_recycle from overtaking the
1183 slot_queue_recycle = state->remote->slot_queue_recycle;
1184 state->remote->slot_queue[slot_queue_recycle &
1185 VCHIQ_SLOT_QUEUE_MASK] =
1186 SLOT_INDEX_FROM_INFO(state, slot_info);
1187 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1188 vchiq_log_info(vchiq_core_log_level,
1189 "%d: %s %d - recycle->%x", state->id, __func__,
1190 SLOT_INDEX_FROM_INFO(state, slot_info),
1191 state->remote->slot_queue_recycle);
1193 /* A write barrier is necessary, but remote_event_signal
1195 remote_event_signal(&state->remote->recycle);
1198 mutex_unlock(&state->recycle_mutex);
1201 /* Called by the slot handler - don't hold the bulk mutex */
1202 static VCHIQ_STATUS_T
1203 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1206 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1208 vchiq_log_trace(vchiq_core_log_level,
1209 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1210 service->state->id, service->localport,
1211 (queue == &service->bulk_tx) ? 't' : 'r',
1212 queue->process, queue->remote_notify, queue->remove);
1214 queue->remote_notify = queue->process;
1216 if (status == VCHIQ_SUCCESS) {
1217 while (queue->remove != queue->remote_notify) {
1218 struct vchiq_bulk *bulk =
1219 &queue->bulks[BULK_INDEX(queue->remove)];
1221 /* Only generate callbacks for non-dummy bulk
1222 ** requests, and non-terminated services */
1223 if (bulk->data && service->instance) {
1224 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1225 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1226 VCHIQ_SERVICE_STATS_INC(service,
1228 VCHIQ_SERVICE_STATS_ADD(service,
1232 VCHIQ_SERVICE_STATS_INC(service,
1234 VCHIQ_SERVICE_STATS_ADD(service,
1239 VCHIQ_SERVICE_STATS_INC(service,
1240 bulk_aborted_count);
1242 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1243 struct bulk_waiter *waiter;
1245 spin_lock(&bulk_waiter_spinlock);
1246 waiter = bulk->userdata;
1248 waiter->actual = bulk->actual;
1249 complete(&waiter->event);
1251 spin_unlock(&bulk_waiter_spinlock);
1252 } else if (bulk->mode ==
1253 VCHIQ_BULK_MODE_CALLBACK) {
1254 VCHIQ_REASON_T reason = (bulk->dir ==
1255 VCHIQ_BULK_TRANSMIT) ?
1257 VCHIQ_BULK_ACTUAL_ABORTED) ?
1258 VCHIQ_BULK_TRANSMIT_ABORTED :
1259 VCHIQ_BULK_TRANSMIT_DONE) :
1261 VCHIQ_BULK_ACTUAL_ABORTED) ?
1262 VCHIQ_BULK_RECEIVE_ABORTED :
1263 VCHIQ_BULK_RECEIVE_DONE);
1264 status = make_service_callback(service,
1265 reason, NULL, bulk->userdata);
1266 if (status == VCHIQ_RETRY)
1272 complete(&service->bulk_remove_event);
1275 status = VCHIQ_SUCCESS;
1278 if (status == VCHIQ_RETRY)
1279 request_poll(service->state, service,
1280 (queue == &service->bulk_tx) ?
1281 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1286 /* Called by the slot handler thread */
1288 poll_services(struct vchiq_state *state)
1292 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1295 flags = atomic_xchg(&state->poll_services[group], 0);
1296 for (i = 0; flags; i++) {
1297 if (flags & (1 << i)) {
1298 struct vchiq_service *service =
1299 find_service_by_port(state,
1307 atomic_xchg(&service->poll_flags, 0);
1309 (1 << VCHIQ_POLL_REMOVE)) {
1310 vchiq_log_info(vchiq_core_log_level,
1311 "%d: ps - remove %d<->%d",
1312 state->id, service->localport,
1313 service->remoteport);
1315 /* Make it look like a client, because
1316 it must be removed and not left in
1317 the LISTENING state. */
1318 service->public_fourcc =
1319 VCHIQ_FOURCC_INVALID;
1321 if (vchiq_close_service_internal(
1322 service, 0/*!close_recvd*/) !=
1324 request_poll(state, service,
1326 } else if (service_flags &
1327 (1 << VCHIQ_POLL_TERMINATE)) {
1328 vchiq_log_info(vchiq_core_log_level,
1329 "%d: ps - terminate %d<->%d",
1330 state->id, service->localport,
1331 service->remoteport);
1332 if (vchiq_close_service_internal(
1333 service, 0/*!close_recvd*/) !=
1335 request_poll(state, service,
1336 VCHIQ_POLL_TERMINATE);
1338 if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
1339 notify_bulks(service,
1342 if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
1343 notify_bulks(service,
1346 unlock_service(service);
1352 /* Called with the bulk_mutex held */
1354 abort_outstanding_bulks(struct vchiq_service *service,
1355 struct vchiq_bulk_queue *queue)
1357 int is_tx = (queue == &service->bulk_tx);
1359 vchiq_log_trace(vchiq_core_log_level,
1360 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1361 service->state->id, service->localport, is_tx ? 't' : 'r',
1362 queue->local_insert, queue->remote_insert, queue->process);
1364 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1365 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1367 while ((queue->process != queue->local_insert) ||
1368 (queue->process != queue->remote_insert)) {
1369 struct vchiq_bulk *bulk =
1370 &queue->bulks[BULK_INDEX(queue->process)];
1372 if (queue->process == queue->remote_insert) {
1373 /* fabricate a matching dummy bulk */
1374 bulk->remote_data = NULL;
1375 bulk->remote_size = 0;
1376 queue->remote_insert++;
1379 if (queue->process != queue->local_insert) {
1380 vchiq_complete_bulk(bulk);
1382 vchiq_log_info(SRVTRACE_LEVEL(service),
1383 "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
1385 is_tx ? "Send Bulk to" : "Recv Bulk from",
1386 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1387 service->remoteport,
1391 /* fabricate a matching dummy bulk */
1394 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1395 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1397 queue->local_insert++;
1405 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1407 struct vchiq_service *service = NULL;
1409 unsigned int localport, remoteport;
1411 msgid = header->msgid;
1412 size = header->size;
1413 localport = VCHIQ_MSG_DSTPORT(msgid);
1414 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1415 if (size >= sizeof(struct vchiq_open_payload)) {
1416 const struct vchiq_open_payload *payload =
1417 (struct vchiq_open_payload *)header->data;
1418 unsigned int fourcc;
1420 fourcc = payload->fourcc;
1421 vchiq_log_info(vchiq_core_log_level,
1422 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1423 state->id, header, localport,
1424 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1426 service = get_listening_service(state, fourcc);
1429 /* A matching service exists */
1430 short version = payload->version;
1431 short version_min = payload->version_min;
1433 if ((service->version < version_min) ||
1434 (version < service->version_min)) {
1435 /* Version mismatch */
1436 vchiq_loud_error_header();
1437 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1438 "version mismatch - local (%d, min %d)"
1439 " vs. remote (%d, min %d)",
1440 state->id, service->localport,
1441 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1442 service->version, service->version_min,
1443 version, version_min);
1444 vchiq_loud_error_footer();
1445 unlock_service(service);
1449 service->peer_version = version;
1451 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1452 struct vchiq_openack_payload ack_payload = {
1456 if (state->version_common <
1457 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1460 /* Acknowledge the OPEN */
1461 if (service->sync) {
1462 if (queue_message_sync(
1469 memcpy_copy_callback,
1471 sizeof(ack_payload),
1473 goto bail_not_ready;
1475 if (queue_message(state,
1481 memcpy_copy_callback,
1483 sizeof(ack_payload),
1485 goto bail_not_ready;
1488 /* The service is now open */
1489 vchiq_set_service_state(service,
1490 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1491 : VCHIQ_SRVSTATE_OPEN);
1494 service->remoteport = remoteport;
1495 service->client_id = ((int *)header->data)[1];
1496 if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
1497 NULL, NULL) == VCHIQ_RETRY) {
1498 /* Bail out if not ready */
1499 service->remoteport = VCHIQ_PORT_FREE;
1500 goto bail_not_ready;
1503 /* Success - the message has been dealt with */
1504 unlock_service(service);
1510 /* No available service, or an invalid request - send a CLOSE */
1511 if (queue_message(state, NULL,
1512 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1513 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1514 goto bail_not_ready;
1520 unlock_service(service);
1525 /* Called by the slot handler thread */
1527 parse_rx_slots(struct vchiq_state *state)
1529 struct vchiq_shared_state *remote = state->remote;
1530 struct vchiq_service *service = NULL;
1533 DEBUG_INITIALISE(state->local)
1535 tx_pos = remote->tx_pos;
1537 while (state->rx_pos != tx_pos) {
1538 struct vchiq_header *header;
1541 unsigned int localport, remoteport;
1543 DEBUG_TRACE(PARSE_LINE);
1544 if (!state->rx_data) {
1547 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1548 rx_index = remote->slot_queue[
1549 SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1550 VCHIQ_SLOT_QUEUE_MASK];
1551 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1553 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1555 /* Initialise use_count to one, and increment
1556 ** release_count at the end of the slot to avoid
1557 ** releasing the slot prematurely. */
1558 state->rx_info->use_count = 1;
1559 state->rx_info->release_count = 0;
1562 header = (struct vchiq_header *)(state->rx_data +
1563 (state->rx_pos & VCHIQ_SLOT_MASK));
1564 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1565 msgid = header->msgid;
1566 DEBUG_VALUE(PARSE_MSGID, msgid);
1567 size = header->size;
1568 type = VCHIQ_MSG_TYPE(msgid);
1569 localport = VCHIQ_MSG_DSTPORT(msgid);
1570 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1572 if (type != VCHIQ_MSG_DATA)
1573 VCHIQ_STATS_INC(state, ctrl_rx_count);
1576 case VCHIQ_MSG_OPENACK:
1577 case VCHIQ_MSG_CLOSE:
1578 case VCHIQ_MSG_DATA:
1579 case VCHIQ_MSG_BULK_RX:
1580 case VCHIQ_MSG_BULK_TX:
1581 case VCHIQ_MSG_BULK_RX_DONE:
1582 case VCHIQ_MSG_BULK_TX_DONE:
1583 service = find_service_by_port(state, localport);
1585 ((service->remoteport != remoteport) &&
1586 (service->remoteport != VCHIQ_PORT_FREE))) &&
1588 (type == VCHIQ_MSG_CLOSE)) {
1589 /* This could be a CLOSE from a client which
1590 hadn't yet received the OPENACK - look for
1591 the connected service */
1593 unlock_service(service);
1594 service = get_connected_service(state,
1597 vchiq_log_warning(vchiq_core_log_level,
1598 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1599 state->id, msg_type_str(type),
1600 header, remoteport, localport,
1601 service->localport);
1605 vchiq_log_error(vchiq_core_log_level,
1606 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1607 state->id, msg_type_str(type),
1608 header, remoteport, localport,
1617 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1620 svc_fourcc = service
1621 ? service->base.fourcc
1622 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1623 vchiq_log_info(SRVTRACE_LEVEL(service),
1624 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
1626 msg_type_str(type), type,
1627 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1628 remoteport, localport, size);
1630 vchiq_log_dump_mem("Rcvd", 0, header->data,
1634 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1635 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1636 vchiq_log_error(vchiq_core_log_level,
1637 "header %pK (msgid %x) - size %x too big for slot",
1638 header, (unsigned int)msgid,
1639 (unsigned int)size);
1640 WARN(1, "oversized for slot\n");
1644 case VCHIQ_MSG_OPEN:
1645 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1646 if (!parse_open(state, header))
1647 goto bail_not_ready;
1649 case VCHIQ_MSG_OPENACK:
1650 if (size >= sizeof(struct vchiq_openack_payload)) {
1651 const struct vchiq_openack_payload *payload =
1652 (struct vchiq_openack_payload *)
1654 service->peer_version = payload->version;
1656 vchiq_log_info(vchiq_core_log_level,
1657 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1658 state->id, header, size, remoteport, localport,
1659 service->peer_version);
1660 if (service->srvstate ==
1661 VCHIQ_SRVSTATE_OPENING) {
1662 service->remoteport = remoteport;
1663 vchiq_set_service_state(service,
1664 VCHIQ_SRVSTATE_OPEN);
1665 complete(&service->remove_event);
1667 vchiq_log_error(vchiq_core_log_level,
1668 "OPENACK received in state %s",
1669 srvstate_names[service->srvstate]);
1671 case VCHIQ_MSG_CLOSE:
1672 WARN_ON(size != 0); /* There should be no data */
1674 vchiq_log_info(vchiq_core_log_level,
1675 "%d: prs CLOSE@%pK (%d->%d)",
1676 state->id, header, remoteport, localport);
1678 mark_service_closing_internal(service, 1);
1680 if (vchiq_close_service_internal(service,
1681 1/*close_recvd*/) == VCHIQ_RETRY)
1682 goto bail_not_ready;
1684 vchiq_log_info(vchiq_core_log_level,
1685 "Close Service %c%c%c%c s:%u d:%d",
1686 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1688 service->remoteport);
1690 case VCHIQ_MSG_DATA:
1691 vchiq_log_info(vchiq_core_log_level,
1692 "%d: prs DATA@%pK,%x (%d->%d)",
1693 state->id, header, size, remoteport, localport);
1695 if ((service->remoteport == remoteport)
1696 && (service->srvstate ==
1697 VCHIQ_SRVSTATE_OPEN)) {
1698 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1699 claim_slot(state->rx_info);
1700 DEBUG_TRACE(PARSE_LINE);
1701 if (make_service_callback(service,
1702 VCHIQ_MESSAGE_AVAILABLE, header,
1703 NULL) == VCHIQ_RETRY) {
1704 DEBUG_TRACE(PARSE_LINE);
1705 goto bail_not_ready;
1707 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1708 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1711 VCHIQ_STATS_INC(state, error_count);
1714 case VCHIQ_MSG_CONNECT:
1715 vchiq_log_info(vchiq_core_log_level,
1716 "%d: prs CONNECT@%pK", state->id, header);
1717 state->version_common = ((struct vchiq_slot_zero *)
1718 state->slot_data)->version;
1719 complete(&state->connect);
1721 case VCHIQ_MSG_BULK_RX:
1722 case VCHIQ_MSG_BULK_TX:
1724 * We should never receive a bulk request from the
1725 * other side since we're not setup to perform as the
1730 case VCHIQ_MSG_BULK_RX_DONE:
1731 case VCHIQ_MSG_BULK_TX_DONE:
1732 if ((service->remoteport == remoteport)
1733 && (service->srvstate !=
1734 VCHIQ_SRVSTATE_FREE)) {
1735 struct vchiq_bulk_queue *queue;
1736 struct vchiq_bulk *bulk;
1738 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1739 &service->bulk_rx : &service->bulk_tx;
1741 DEBUG_TRACE(PARSE_LINE);
1742 if (mutex_lock_killable(&service->bulk_mutex)) {
1743 DEBUG_TRACE(PARSE_LINE);
1744 goto bail_not_ready;
1746 if ((int)(queue->remote_insert -
1747 queue->local_insert) >= 0) {
1748 vchiq_log_error(vchiq_core_log_level,
1749 "%d: prs %s@%pK (%d->%d) "
1750 "unexpected (ri=%d,li=%d)",
1751 state->id, msg_type_str(type),
1752 header, remoteport, localport,
1753 queue->remote_insert,
1754 queue->local_insert);
1755 mutex_unlock(&service->bulk_mutex);
1758 if (queue->process != queue->remote_insert) {
1759 pr_err("%s: p %x != ri %x\n",
1762 queue->remote_insert);
1763 mutex_unlock(&service->bulk_mutex);
1764 goto bail_not_ready;
1767 bulk = &queue->bulks[
1768 BULK_INDEX(queue->remote_insert)];
1769 bulk->actual = *(int *)header->data;
1770 queue->remote_insert++;
1772 vchiq_log_info(vchiq_core_log_level,
1773 "%d: prs %s@%pK (%d->%d) %x@%pK",
1774 state->id, msg_type_str(type),
1775 header, remoteport, localport,
1776 bulk->actual, bulk->data);
1778 vchiq_log_trace(vchiq_core_log_level,
1779 "%d: prs:%d %cx li=%x ri=%x p=%x",
1780 state->id, localport,
1781 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1783 queue->local_insert,
1784 queue->remote_insert, queue->process);
1786 DEBUG_TRACE(PARSE_LINE);
1787 WARN_ON(queue->process == queue->local_insert);
1788 vchiq_complete_bulk(bulk);
1790 mutex_unlock(&service->bulk_mutex);
1791 DEBUG_TRACE(PARSE_LINE);
1792 notify_bulks(service, queue, 1/*retry_poll*/);
1793 DEBUG_TRACE(PARSE_LINE);
1796 case VCHIQ_MSG_PADDING:
1797 vchiq_log_trace(vchiq_core_log_level,
1798 "%d: prs PADDING@%pK,%x",
1799 state->id, header, size);
1801 case VCHIQ_MSG_PAUSE:
1802 /* If initiated, signal the application thread */
1803 vchiq_log_trace(vchiq_core_log_level,
1804 "%d: prs PAUSE@%pK,%x",
1805 state->id, header, size);
1806 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1807 vchiq_log_error(vchiq_core_log_level,
1808 "%d: PAUSE received in state PAUSED",
1812 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1813 /* Send a PAUSE in response */
1814 if (queue_message(state, NULL,
1815 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1816 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1818 goto bail_not_ready;
1820 /* At this point slot_mutex is held */
1821 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1822 vchiq_platform_paused(state);
1824 case VCHIQ_MSG_RESUME:
1825 vchiq_log_trace(vchiq_core_log_level,
1826 "%d: prs RESUME@%pK,%x",
1827 state->id, header, size);
1828 /* Release the slot mutex */
1829 mutex_unlock(&state->slot_mutex);
1830 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1831 vchiq_platform_resumed(state);
1834 case VCHIQ_MSG_REMOTE_USE:
1835 vchiq_on_remote_use(state);
1837 case VCHIQ_MSG_REMOTE_RELEASE:
1838 vchiq_on_remote_release(state);
1840 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1841 vchiq_on_remote_use_active(state);
1845 vchiq_log_error(vchiq_core_log_level,
1846 "%d: prs invalid msgid %x@%pK,%x",
1847 state->id, msgid, header, size);
1848 WARN(1, "invalid message\n");
1854 unlock_service(service);
1858 state->rx_pos += calc_stride(size);
1860 DEBUG_TRACE(PARSE_LINE);
1861 /* Perform some housekeeping when the end of the slot is
1863 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1864 /* Remove the extra reference count. */
1865 release_slot(state, state->rx_info, NULL, NULL);
1866 state->rx_data = NULL;
1872 unlock_service(service);
1875 /* Called by the slot handler thread */
1877 slot_handler_func(void *v)
1879 struct vchiq_state *state = (struct vchiq_state *)v;
1880 struct vchiq_shared_state *local = state->local;
1882 DEBUG_INITIALISE(local)
1885 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1886 DEBUG_TRACE(SLOT_HANDLER_LINE);
1887 remote_event_wait(&state->trigger_event, &local->trigger);
1891 DEBUG_TRACE(SLOT_HANDLER_LINE);
1892 if (state->poll_needed) {
1893 /* Check if we need to suspend - may change our
1895 vchiq_platform_check_suspend(state);
1897 state->poll_needed = 0;
1899 /* Handle service polling and other rare conditions here
1900 ** out of the mainline code */
1901 switch (state->conn_state) {
1902 case VCHIQ_CONNSTATE_CONNECTED:
1903 /* Poll the services as requested */
1904 poll_services(state);
1907 case VCHIQ_CONNSTATE_PAUSING:
1908 if (queue_message(state, NULL,
1909 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1911 QMFLAGS_NO_MUTEX_UNLOCK)
1913 vchiq_set_conn_state(state,
1914 VCHIQ_CONNSTATE_PAUSE_SENT);
1917 state->poll_needed = 1;
1921 case VCHIQ_CONNSTATE_PAUSED:
1922 vchiq_platform_resume(state);
1925 case VCHIQ_CONNSTATE_RESUMING:
1926 if (queue_message(state, NULL,
1927 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1928 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1930 vchiq_set_conn_state(state,
1931 VCHIQ_CONNSTATE_CONNECTED);
1932 vchiq_platform_resumed(state);
1934 /* This should really be impossible,
1935 ** since the PAUSE should have flushed
1936 ** through outstanding messages. */
1937 vchiq_log_error(vchiq_core_log_level,
1938 "Failed to send RESUME "
1943 case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
1944 case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
1945 vchiq_platform_handle_timeout(state);
1953 DEBUG_TRACE(SLOT_HANDLER_LINE);
1954 parse_rx_slots(state);
1959 /* Called by the recycle thread */
1961 recycle_func(void *v)
1963 struct vchiq_state *state = (struct vchiq_state *)v;
1964 struct vchiq_shared_state *local = state->local;
1968 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1970 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1976 remote_event_wait(&state->recycle_event, &local->recycle);
1978 process_free_queue(state, found, length);
1983 /* Called by the sync thread */
1987 struct vchiq_state *state = (struct vchiq_state *)v;
1988 struct vchiq_shared_state *local = state->local;
1989 struct vchiq_header *header =
1990 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1991 state->remote->slot_sync);
1994 struct vchiq_service *service;
1997 unsigned int localport, remoteport;
1999 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2003 msgid = header->msgid;
2004 size = header->size;
2005 type = VCHIQ_MSG_TYPE(msgid);
2006 localport = VCHIQ_MSG_DSTPORT(msgid);
2007 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2009 service = find_service_by_port(state, localport);
2012 vchiq_log_error(vchiq_sync_log_level,
2013 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2014 state->id, msg_type_str(type),
2015 header, remoteport, localport, localport);
2016 release_message_sync(state, header);
2020 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2023 svc_fourcc = service
2024 ? service->base.fourcc
2025 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2026 vchiq_log_trace(vchiq_sync_log_level,
2027 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2029 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2030 remoteport, localport, size);
2032 vchiq_log_dump_mem("Rcvd", 0, header->data,
2037 case VCHIQ_MSG_OPENACK:
2038 if (size >= sizeof(struct vchiq_openack_payload)) {
2039 const struct vchiq_openack_payload *payload =
2040 (struct vchiq_openack_payload *)
2042 service->peer_version = payload->version;
2044 vchiq_log_info(vchiq_sync_log_level,
2045 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2046 state->id, header, size, remoteport, localport,
2047 service->peer_version);
2048 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2049 service->remoteport = remoteport;
2050 vchiq_set_service_state(service,
2051 VCHIQ_SRVSTATE_OPENSYNC);
2053 complete(&service->remove_event);
2055 release_message_sync(state, header);
2058 case VCHIQ_MSG_DATA:
2059 vchiq_log_trace(vchiq_sync_log_level,
2060 "%d: sf DATA@%pK,%x (%d->%d)",
2061 state->id, header, size, remoteport, localport);
2063 if ((service->remoteport == remoteport) &&
2064 (service->srvstate ==
2065 VCHIQ_SRVSTATE_OPENSYNC)) {
2066 if (make_service_callback(service,
2067 VCHIQ_MESSAGE_AVAILABLE, header,
2068 NULL) == VCHIQ_RETRY)
2069 vchiq_log_error(vchiq_sync_log_level,
2070 "synchronous callback to "
2071 "service %d returns "
2078 vchiq_log_error(vchiq_sync_log_level,
2079 "%d: sf unexpected msgid %x@%pK,%x",
2080 state->id, msgid, header, size);
2081 release_message_sync(state, header);
2085 unlock_service(service);
2092 init_bulk_queue(struct vchiq_bulk_queue *queue)
2094 queue->local_insert = 0;
2095 queue->remote_insert = 0;
2097 queue->remote_notify = 0;
2102 get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
2104 return conn_state_names[conn_state];
2107 struct vchiq_slot_zero *
2108 vchiq_init_slots(void *mem_base, int mem_size)
2111 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2112 struct vchiq_slot_zero *slot_zero =
2113 (struct vchiq_slot_zero *)((char *)mem_base + mem_align);
2114 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2115 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2117 /* Ensure there is enough memory to run an absolutely minimum system */
2118 num_slots -= first_data_slot;
2120 if (num_slots < 4) {
2121 vchiq_log_error(vchiq_core_log_level,
2122 "%s - insufficient memory %x bytes",
2123 __func__, mem_size);
2127 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2129 slot_zero->magic = VCHIQ_MAGIC;
2130 slot_zero->version = VCHIQ_VERSION;
2131 slot_zero->version_min = VCHIQ_VERSION_MIN;
2132 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2133 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2134 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2135 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2137 slot_zero->master.slot_sync = first_data_slot;
2138 slot_zero->master.slot_first = first_data_slot + 1;
2139 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2140 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2141 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2142 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2148 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2150 struct vchiq_shared_state *local;
2151 struct vchiq_shared_state *remote;
2152 VCHIQ_STATUS_T status;
2153 char threadname[16];
2156 vchiq_log_warning(vchiq_core_log_level,
2157 "%s: slot_zero = %pK", __func__, slot_zero);
2159 if (vchiq_states[0]) {
2160 pr_err("%s: VCHIQ state already initialized\n", __func__);
2164 local = &slot_zero->slave;
2165 remote = &slot_zero->master;
2167 if (local->initialised) {
2168 vchiq_loud_error_header();
2169 if (remote->initialised)
2170 vchiq_loud_error("local state has already been "
2173 vchiq_loud_error("master/slave mismatch two slaves");
2174 vchiq_loud_error_footer();
2178 memset(state, 0, sizeof(struct vchiq_state));
2181 initialize shared state pointers
2184 state->local = local;
2185 state->remote = remote;
2186 state->slot_data = (struct vchiq_slot *)slot_zero;
2189 initialize events and mutexes
2192 init_completion(&state->connect);
2193 mutex_init(&state->mutex);
2194 mutex_init(&state->slot_mutex);
2195 mutex_init(&state->recycle_mutex);
2196 mutex_init(&state->sync_mutex);
2197 mutex_init(&state->bulk_transfer_mutex);
2199 init_completion(&state->slot_available_event);
2200 init_completion(&state->slot_remove_event);
2201 init_completion(&state->data_quota_event);
2203 state->slot_queue_available = 0;
2205 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2206 struct vchiq_service_quota *service_quota =
2207 &state->service_quotas[i];
2208 init_completion(&service_quota->quota_event);
2211 for (i = local->slot_first; i <= local->slot_last; i++) {
2212 local->slot_queue[state->slot_queue_available++] = i;
2213 complete(&state->slot_available_event);
2216 state->default_slot_quota = state->slot_queue_available/2;
2217 state->default_message_quota =
2218 min((unsigned short)(state->default_slot_quota * 256),
2219 (unsigned short)~0);
2221 state->previous_data_index = -1;
2222 state->data_use_count = 0;
2223 state->data_quota = state->slot_queue_available - 1;
2225 remote_event_create(&state->trigger_event, &local->trigger);
2227 remote_event_create(&state->recycle_event, &local->recycle);
2228 local->slot_queue_recycle = state->slot_queue_available;
2229 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2230 remote_event_create(&state->sync_release_event, &local->sync_release);
2232 /* At start-of-day, the slot is empty and available */
2233 ((struct vchiq_header *)
2234 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2235 VCHIQ_MSGID_PADDING;
2236 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2238 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2240 status = vchiq_platform_init_state(state);
2243 bring up slot handler thread
2245 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2246 state->slot_handler_thread = kthread_create(&slot_handler_func,
2250 if (IS_ERR(state->slot_handler_thread)) {
2251 vchiq_loud_error_header();
2252 vchiq_loud_error("couldn't create thread %s", threadname);
2253 vchiq_loud_error_footer();
2256 set_user_nice(state->slot_handler_thread, -19);
2258 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2259 state->recycle_thread = kthread_create(&recycle_func,
2262 if (IS_ERR(state->recycle_thread)) {
2263 vchiq_loud_error_header();
2264 vchiq_loud_error("couldn't create thread %s", threadname);
2265 vchiq_loud_error_footer();
2266 goto fail_free_handler_thread;
2268 set_user_nice(state->recycle_thread, -19);
2270 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2271 state->sync_thread = kthread_create(&sync_func,
2274 if (IS_ERR(state->sync_thread)) {
2275 vchiq_loud_error_header();
2276 vchiq_loud_error("couldn't create thread %s", threadname);
2277 vchiq_loud_error_footer();
2278 goto fail_free_recycle_thread;
2280 set_user_nice(state->sync_thread, -20);
2282 wake_up_process(state->slot_handler_thread);
2283 wake_up_process(state->recycle_thread);
2284 wake_up_process(state->sync_thread);
2286 vchiq_states[0] = state;
2288 /* Indicate readiness to the other side */
2289 local->initialised = 1;
2293 fail_free_recycle_thread:
2294 kthread_stop(state->recycle_thread);
2295 fail_free_handler_thread:
2296 kthread_stop(state->slot_handler_thread);
2301 /* Called from application thread when a client or server service is created. */
2302 struct vchiq_service *
2303 vchiq_add_service_internal(struct vchiq_state *state,
2304 const struct vchiq_service_params *params,
2305 int srvstate, VCHIQ_INSTANCE_T instance,
2306 VCHIQ_USERDATA_TERM_T userdata_term)
2308 struct vchiq_service *service;
2309 struct vchiq_service **pservice = NULL;
2310 struct vchiq_service_quota *service_quota;
2313 service = kmalloc(sizeof(*service), GFP_KERNEL);
2317 service->base.fourcc = params->fourcc;
2318 service->base.callback = params->callback;
2319 service->base.userdata = params->userdata;
2320 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2321 service->ref_count = 1;
2322 service->srvstate = VCHIQ_SRVSTATE_FREE;
2323 service->userdata_term = userdata_term;
2324 service->localport = VCHIQ_PORT_FREE;
2325 service->remoteport = VCHIQ_PORT_FREE;
2327 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2328 VCHIQ_FOURCC_INVALID : params->fourcc;
2329 service->client_id = 0;
2330 service->auto_close = 1;
2332 service->closing = 0;
2334 atomic_set(&service->poll_flags, 0);
2335 service->version = params->version;
2336 service->version_min = params->version_min;
2337 service->state = state;
2338 service->instance = instance;
2339 service->service_use_count = 0;
2340 init_bulk_queue(&service->bulk_tx);
2341 init_bulk_queue(&service->bulk_rx);
2342 init_completion(&service->remove_event);
2343 init_completion(&service->bulk_remove_event);
2344 mutex_init(&service->bulk_mutex);
2345 memset(&service->stats, 0, sizeof(service->stats));
2347 /* Although it is perfectly possible to use service_spinlock
2348 ** to protect the creation of services, it is overkill as it
2349 ** disables interrupts while the array is searched.
2350 ** The only danger is of another thread trying to create a
2351 ** service - service deletion is safe.
2352 ** Therefore it is preferable to use state->mutex which,
2353 ** although slower to claim, doesn't block interrupts while
2357 mutex_lock(&state->mutex);
2359 /* Prepare to use a previously unused service */
2360 if (state->unused_service < VCHIQ_MAX_SERVICES)
2361 pservice = &state->services[state->unused_service];
2363 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2364 for (i = 0; i < state->unused_service; i++) {
2365 struct vchiq_service *srv = state->services[i];
2368 pservice = &state->services[i];
2373 for (i = (state->unused_service - 1); i >= 0; i--) {
2374 struct vchiq_service *srv = state->services[i];
2377 pservice = &state->services[i];
2378 else if ((srv->public_fourcc == params->fourcc)
2379 && ((srv->instance != instance) ||
2380 (srv->base.callback !=
2381 params->callback))) {
2382 /* There is another server using this
2383 ** fourcc which doesn't match. */
2391 service->localport = (pservice - state->services);
2393 handle_seq = VCHIQ_MAX_STATES *
2395 service->handle = handle_seq |
2396 (state->id * VCHIQ_MAX_SERVICES) |
2398 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2399 *pservice = service;
2400 if (pservice == &state->services[state->unused_service])
2401 state->unused_service++;
2404 mutex_unlock(&state->mutex);
2411 service_quota = &state->service_quotas[service->localport];
2412 service_quota->slot_quota = state->default_slot_quota;
2413 service_quota->message_quota = state->default_message_quota;
2414 if (service_quota->slot_use_count == 0)
2415 service_quota->previous_tx_index =
2416 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2419 /* Bring this service online */
2420 vchiq_set_service_state(service, srvstate);
2422 vchiq_log_info(vchiq_core_msg_log_level,
2423 "%s Service %c%c%c%c SrcPort:%d",
2424 (srvstate == VCHIQ_SRVSTATE_OPENING)
2426 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2427 service->localport);
2429 /* Don't unlock the service - leave it with a ref_count of 1. */
2435 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2437 struct vchiq_open_payload payload = {
2438 service->base.fourcc,
2441 service->version_min
2443 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2445 service->client_id = client_id;
2446 vchiq_use_service_internal(service);
2447 status = queue_message(service->state,
2449 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2452 memcpy_copy_callback,
2455 QMFLAGS_IS_BLOCKING);
2456 if (status == VCHIQ_SUCCESS) {
2457 /* Wait for the ACK/NAK */
2458 if (wait_for_completion_killable(&service->remove_event)) {
2459 status = VCHIQ_RETRY;
2460 vchiq_release_service_internal(service);
2461 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2462 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2463 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2464 vchiq_log_error(vchiq_core_log_level,
2465 "%d: osi - srvstate = %s (ref %d)",
2467 srvstate_names[service->srvstate],
2468 service->ref_count);
2469 status = VCHIQ_ERROR;
2470 VCHIQ_SERVICE_STATS_INC(service, error_count);
2471 vchiq_release_service_internal(service);
2478 release_service_messages(struct vchiq_service *service)
2480 struct vchiq_state *state = service->state;
2481 int slot_last = state->remote->slot_last;
2484 /* Release any claimed messages aimed at this service */
2486 if (service->sync) {
2487 struct vchiq_header *header =
2488 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2489 state->remote->slot_sync);
2490 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2491 release_message_sync(state, header);
2496 for (i = state->remote->slot_first; i <= slot_last; i++) {
2497 struct vchiq_slot_info *slot_info =
2498 SLOT_INFO_FROM_INDEX(state, i);
2499 if (slot_info->release_count != slot_info->use_count) {
2501 (char *)SLOT_DATA_FROM_INDEX(state, i);
2502 unsigned int pos, end;
2504 end = VCHIQ_SLOT_SIZE;
2505 if (data == state->rx_data)
2506 /* This buffer is still being read from - stop
2507 ** at the current read position */
2508 end = state->rx_pos & VCHIQ_SLOT_MASK;
2513 struct vchiq_header *header =
2514 (struct vchiq_header *)(data + pos);
2515 int msgid = header->msgid;
2516 int port = VCHIQ_MSG_DSTPORT(msgid);
2518 if ((port == service->localport) &&
2519 (msgid & VCHIQ_MSGID_CLAIMED)) {
2520 vchiq_log_info(vchiq_core_log_level,
2521 " fsi - hdr %pK", header);
2522 release_slot(state, slot_info, header,
2525 pos += calc_stride(header->size);
2526 if (pos > VCHIQ_SLOT_SIZE) {
2527 vchiq_log_error(vchiq_core_log_level,
2528 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2530 header->msgid, header->size);
2531 WARN(1, "invalid slot position\n");
2539 do_abort_bulks(struct vchiq_service *service)
2541 VCHIQ_STATUS_T status;
2543 /* Abort any outstanding bulk transfers */
2544 if (mutex_lock_killable(&service->bulk_mutex) != 0)
2546 abort_outstanding_bulks(service, &service->bulk_tx);
2547 abort_outstanding_bulks(service, &service->bulk_rx);
2548 mutex_unlock(&service->bulk_mutex);
2550 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2551 if (status == VCHIQ_SUCCESS)
2552 status = notify_bulks(service, &service->bulk_rx,
2554 return (status == VCHIQ_SUCCESS);
2557 static VCHIQ_STATUS_T
2558 close_service_complete(struct vchiq_service *service, int failstate)
2560 VCHIQ_STATUS_T status;
2561 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2564 switch (service->srvstate) {
2565 case VCHIQ_SRVSTATE_OPEN:
2566 case VCHIQ_SRVSTATE_CLOSESENT:
2567 case VCHIQ_SRVSTATE_CLOSERECVD:
2569 if (service->auto_close) {
2570 service->client_id = 0;
2571 service->remoteport = VCHIQ_PORT_FREE;
2572 newstate = VCHIQ_SRVSTATE_LISTENING;
2574 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2576 newstate = VCHIQ_SRVSTATE_CLOSED;
2577 vchiq_set_service_state(service, newstate);
2579 case VCHIQ_SRVSTATE_LISTENING:
2582 vchiq_log_error(vchiq_core_log_level,
2583 "%s(%x) called in state %s", __func__,
2584 service->handle, srvstate_names[service->srvstate]);
2585 WARN(1, "%s in unexpected state\n", __func__);
2589 status = make_service_callback(service,
2590 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2592 if (status != VCHIQ_RETRY) {
2593 int uc = service->service_use_count;
2595 /* Complete the close process */
2596 for (i = 0; i < uc; i++)
2597 /* cater for cases where close is forced and the
2598 ** client may not close all it's handles */
2599 vchiq_release_service_internal(service);
2601 service->client_id = 0;
2602 service->remoteport = VCHIQ_PORT_FREE;
2604 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2605 vchiq_free_service_internal(service);
2606 else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2608 service->closing = 0;
2610 complete(&service->remove_event);
2613 vchiq_set_service_state(service, failstate);
2618 /* Called by the slot handler */
2620 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2622 struct vchiq_state *state = service->state;
2623 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2624 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2626 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2627 service->state->id, service->localport, close_recvd,
2628 srvstate_names[service->srvstate]);
2630 switch (service->srvstate) {
2631 case VCHIQ_SRVSTATE_CLOSED:
2632 case VCHIQ_SRVSTATE_HIDDEN:
2633 case VCHIQ_SRVSTATE_LISTENING:
2634 case VCHIQ_SRVSTATE_CLOSEWAIT:
2636 vchiq_log_error(vchiq_core_log_level,
2639 __func__, srvstate_names[service->srvstate]);
2640 else if (is_server) {
2641 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2642 status = VCHIQ_ERROR;
2644 service->client_id = 0;
2645 service->remoteport = VCHIQ_PORT_FREE;
2646 if (service->srvstate ==
2647 VCHIQ_SRVSTATE_CLOSEWAIT)
2648 vchiq_set_service_state(service,
2649 VCHIQ_SRVSTATE_LISTENING);
2651 complete(&service->remove_event);
2653 vchiq_free_service_internal(service);
2655 case VCHIQ_SRVSTATE_OPENING:
2657 /* The open was rejected - tell the user */
2658 vchiq_set_service_state(service,
2659 VCHIQ_SRVSTATE_CLOSEWAIT);
2660 complete(&service->remove_event);
2662 /* Shutdown mid-open - let the other side know */
2663 status = queue_message(state, service,
2667 VCHIQ_MSG_DSTPORT(service->remoteport)),
2672 case VCHIQ_SRVSTATE_OPENSYNC:
2673 mutex_lock(&state->sync_mutex);
2675 case VCHIQ_SRVSTATE_OPEN:
2677 if (!do_abort_bulks(service))
2678 status = VCHIQ_RETRY;
2681 release_service_messages(service);
2683 if (status == VCHIQ_SUCCESS)
2684 status = queue_message(state, service,
2688 VCHIQ_MSG_DSTPORT(service->remoteport)),
2689 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2691 if (status == VCHIQ_SUCCESS) {
2693 /* Change the state while the mutex is
2695 vchiq_set_service_state(service,
2696 VCHIQ_SRVSTATE_CLOSESENT);
2697 mutex_unlock(&state->slot_mutex);
2699 mutex_unlock(&state->sync_mutex);
2702 } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2703 mutex_unlock(&state->sync_mutex);
2708 /* Change the state while the mutex is still held */
2709 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2710 mutex_unlock(&state->slot_mutex);
2712 mutex_unlock(&state->sync_mutex);
2714 status = close_service_complete(service,
2715 VCHIQ_SRVSTATE_CLOSERECVD);
2718 case VCHIQ_SRVSTATE_CLOSESENT:
2720 /* This happens when a process is killed mid-close */
2723 if (!do_abort_bulks(service)) {
2724 status = VCHIQ_RETRY;
2728 if (status == VCHIQ_SUCCESS)
2729 status = close_service_complete(service,
2730 VCHIQ_SRVSTATE_CLOSERECVD);
2733 case VCHIQ_SRVSTATE_CLOSERECVD:
2734 if (!close_recvd && is_server)
2735 /* Force into LISTENING mode */
2736 vchiq_set_service_state(service,
2737 VCHIQ_SRVSTATE_LISTENING);
2738 status = close_service_complete(service,
2739 VCHIQ_SRVSTATE_CLOSERECVD);
2743 vchiq_log_error(vchiq_core_log_level,
2744 "%s(%d) called in state %s", __func__,
2745 close_recvd, srvstate_names[service->srvstate]);
2752 /* Called from the application process upon process death */
2754 vchiq_terminate_service_internal(struct vchiq_service *service)
2756 struct vchiq_state *state = service->state;
2758 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2759 state->id, service->localport, service->remoteport);
2761 mark_service_closing(service);
2763 /* Mark the service for removal by the slot handler */
2764 request_poll(state, service, VCHIQ_POLL_REMOVE);
2767 /* Called from the slot handler */
2769 vchiq_free_service_internal(struct vchiq_service *service)
2771 struct vchiq_state *state = service->state;
2773 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2774 state->id, service->localport);
2776 switch (service->srvstate) {
2777 case VCHIQ_SRVSTATE_OPENING:
2778 case VCHIQ_SRVSTATE_CLOSED:
2779 case VCHIQ_SRVSTATE_HIDDEN:
2780 case VCHIQ_SRVSTATE_LISTENING:
2781 case VCHIQ_SRVSTATE_CLOSEWAIT:
2784 vchiq_log_error(vchiq_core_log_level,
2785 "%d: fsi - (%d) in state %s",
2786 state->id, service->localport,
2787 srvstate_names[service->srvstate]);
2791 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2793 complete(&service->remove_event);
2795 /* Release the initial lock */
2796 unlock_service(service);
2800 vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
2802 struct vchiq_service *service;
2805 /* Find all services registered to this client and enable them. */
2807 while ((service = next_service_by_instance(state, instance,
2809 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2810 vchiq_set_service_state(service,
2811 VCHIQ_SRVSTATE_LISTENING);
2812 unlock_service(service);
2815 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2816 if (queue_message(state, NULL,
2817 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2818 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2821 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2824 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2825 if (wait_for_completion_killable(&state->connect))
2828 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2829 complete(&state->connect);
2832 return VCHIQ_SUCCESS;
2836 vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
2838 struct vchiq_service *service;
2841 /* Find all services registered to this client and enable them. */
2843 while ((service = next_service_by_instance(state, instance,
2845 (void)vchiq_remove_service(service->handle);
2846 unlock_service(service);
2849 return VCHIQ_SUCCESS;
2853 vchiq_pause_internal(struct vchiq_state *state)
2855 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2857 switch (state->conn_state) {
2858 case VCHIQ_CONNSTATE_CONNECTED:
2859 /* Request a pause */
2860 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
2861 request_poll(state, NULL, 0);
2864 vchiq_log_error(vchiq_core_log_level,
2866 __func__, conn_state_names[state->conn_state]);
2867 status = VCHIQ_ERROR;
2868 VCHIQ_STATS_INC(state, error_count);
2876 vchiq_resume_internal(struct vchiq_state *state)
2878 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2880 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
2881 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
2882 request_poll(state, NULL, 0);
2884 status = VCHIQ_ERROR;
2885 VCHIQ_STATS_INC(state, error_count);
2892 vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
2894 /* Unregister the service */
2895 struct vchiq_service *service = find_service_by_handle(handle);
2896 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2901 vchiq_log_info(vchiq_core_log_level,
2902 "%d: close_service:%d",
2903 service->state->id, service->localport);
2905 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2906 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2907 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2908 unlock_service(service);
2912 mark_service_closing(service);
2914 if (current == service->state->slot_handler_thread) {
2915 status = vchiq_close_service_internal(service,
2917 WARN_ON(status == VCHIQ_RETRY);
2919 /* Mark the service for termination by the slot handler */
2920 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2924 if (wait_for_completion_killable(&service->remove_event)) {
2925 status = VCHIQ_RETRY;
2929 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2930 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2931 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2934 vchiq_log_warning(vchiq_core_log_level,
2935 "%d: close_service:%d - waiting in state %s",
2936 service->state->id, service->localport,
2937 srvstate_names[service->srvstate]);
2940 if ((status == VCHIQ_SUCCESS) &&
2941 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2942 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2943 status = VCHIQ_ERROR;
2945 unlock_service(service);
2951 vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
2953 /* Unregister the service */
2954 struct vchiq_service *service = find_service_by_handle(handle);
2955 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2960 vchiq_log_info(vchiq_core_log_level,
2961 "%d: remove_service:%d",
2962 service->state->id, service->localport);
2964 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2965 unlock_service(service);
2969 mark_service_closing(service);
2971 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2972 (current == service->state->slot_handler_thread)) {
2973 /* Make it look like a client, because it must be removed and
2974 not left in the LISTENING state. */
2975 service->public_fourcc = VCHIQ_FOURCC_INVALID;
2977 status = vchiq_close_service_internal(service,
2979 WARN_ON(status == VCHIQ_RETRY);
2981 /* Mark the service for removal by the slot handler */
2982 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2985 if (wait_for_completion_killable(&service->remove_event)) {
2986 status = VCHIQ_RETRY;
2990 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2991 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2994 vchiq_log_warning(vchiq_core_log_level,
2995 "%d: remove_service:%d - waiting in state %s",
2996 service->state->id, service->localport,
2997 srvstate_names[service->srvstate]);
3000 if ((status == VCHIQ_SUCCESS) &&
3001 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3002 status = VCHIQ_ERROR;
3004 unlock_service(service);
3009 /* This function may be called by kernel threads or user threads.
3010 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3011 * received and the call should be retried after being returned to user
3013 * When called in blocking mode, the userdata field points to a bulk_waiter
3016 VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3017 void *offset, int size, void *userdata,
3018 VCHIQ_BULK_MODE_T mode,
3019 VCHIQ_BULK_DIR_T dir)
3021 struct vchiq_service *service = find_service_by_handle(handle);
3022 struct vchiq_bulk_queue *queue;
3023 struct vchiq_bulk *bulk;
3024 struct vchiq_state *state;
3025 struct bulk_waiter *bulk_waiter = NULL;
3026 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3027 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3028 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3029 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3032 if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3033 !offset || vchiq_check_service(service) != VCHIQ_SUCCESS)
3037 case VCHIQ_BULK_MODE_NOCALLBACK:
3038 case VCHIQ_BULK_MODE_CALLBACK:
3040 case VCHIQ_BULK_MODE_BLOCKING:
3041 bulk_waiter = (struct bulk_waiter *)userdata;
3042 init_completion(&bulk_waiter->event);
3043 bulk_waiter->actual = 0;
3044 bulk_waiter->bulk = NULL;
3046 case VCHIQ_BULK_MODE_WAITING:
3047 bulk_waiter = (struct bulk_waiter *)userdata;
3048 bulk = bulk_waiter->bulk;
3054 state = service->state;
3056 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3057 &service->bulk_tx : &service->bulk_rx;
3059 if (mutex_lock_killable(&service->bulk_mutex) != 0) {
3060 status = VCHIQ_RETRY;
3064 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3065 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3067 mutex_unlock(&service->bulk_mutex);
3068 if (wait_for_completion_killable(
3069 &service->bulk_remove_event)) {
3070 status = VCHIQ_RETRY;
3073 if (mutex_lock_killable(&service->bulk_mutex)
3075 status = VCHIQ_RETRY;
3078 } while (queue->local_insert == queue->remove +
3079 VCHIQ_NUM_SERVICE_BULKS);
3082 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3086 bulk->userdata = userdata;
3088 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3090 if (vchiq_prepare_bulk_data(bulk, offset, size, dir) != VCHIQ_SUCCESS)
3091 goto unlock_error_exit;
3095 vchiq_log_info(vchiq_core_log_level,
3096 "%d: bt (%d->%d) %cx %x@%pK %pK",
3097 state->id, service->localport, service->remoteport, dir_char,
3098 size, bulk->data, userdata);
3100 /* The slot mutex must be held when the service is being closed, so
3101 claim it here to ensure that isn't happening */
3102 if (mutex_lock_killable(&state->slot_mutex) != 0) {
3103 status = VCHIQ_RETRY;
3104 goto cancel_bulk_error_exit;
3107 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3108 goto unlock_both_error_exit;
3110 payload[0] = (int)(long)bulk->data;
3111 payload[1] = bulk->size;
3112 status = queue_message(state,
3114 VCHIQ_MAKE_MSG(dir_msgtype,
3116 service->remoteport),
3117 memcpy_copy_callback,
3120 QMFLAGS_IS_BLOCKING |
3121 QMFLAGS_NO_MUTEX_LOCK |
3122 QMFLAGS_NO_MUTEX_UNLOCK);
3123 if (status != VCHIQ_SUCCESS) {
3124 goto unlock_both_error_exit;
3127 queue->local_insert++;
3129 mutex_unlock(&state->slot_mutex);
3130 mutex_unlock(&service->bulk_mutex);
3132 vchiq_log_trace(vchiq_core_log_level,
3133 "%d: bt:%d %cx li=%x ri=%x p=%x",
3135 service->localport, dir_char,
3136 queue->local_insert, queue->remote_insert, queue->process);
3139 unlock_service(service);
3141 status = VCHIQ_SUCCESS;
3144 bulk_waiter->bulk = bulk;
3145 if (wait_for_completion_killable(&bulk_waiter->event))
3146 status = VCHIQ_RETRY;
3147 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3148 status = VCHIQ_ERROR;
3153 unlock_both_error_exit:
3154 mutex_unlock(&state->slot_mutex);
3155 cancel_bulk_error_exit:
3156 vchiq_complete_bulk(bulk);
3158 mutex_unlock(&service->bulk_mutex);
3162 unlock_service(service);
3167 vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
3168 ssize_t (*copy_callback)(void *context, void *dest,
3169 size_t offset, size_t maxsize),
3173 struct vchiq_service *service = find_service_by_handle(handle);
3174 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3177 (vchiq_check_service(service) != VCHIQ_SUCCESS))
3181 VCHIQ_SERVICE_STATS_INC(service, error_count);
3186 if (size > VCHIQ_MAX_MSG_SIZE) {
3187 VCHIQ_SERVICE_STATS_INC(service, error_count);
3191 switch (service->srvstate) {
3192 case VCHIQ_SRVSTATE_OPEN:
3193 status = queue_message(service->state, service,
3194 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3196 service->remoteport),
3197 copy_callback, context, size, 1);
3199 case VCHIQ_SRVSTATE_OPENSYNC:
3200 status = queue_message_sync(service->state, service,
3201 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3203 service->remoteport),
3204 copy_callback, context, size, 1);
3207 status = VCHIQ_ERROR;
3213 unlock_service(service);
3219 vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle,
3220 struct vchiq_header *header)
3222 struct vchiq_service *service = find_service_by_handle(handle);
3223 struct vchiq_shared_state *remote;
3224 struct vchiq_state *state;
3230 state = service->state;
3231 remote = state->remote;
3233 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3235 if ((slot_index >= remote->slot_first) &&
3236 (slot_index <= remote->slot_last)) {
3237 int msgid = header->msgid;
3239 if (msgid & VCHIQ_MSGID_CLAIMED) {
3240 struct vchiq_slot_info *slot_info =
3241 SLOT_INFO_FROM_INDEX(state, slot_index);
3243 release_slot(state, slot_info, header, service);
3245 } else if (slot_index == remote->slot_sync)
3246 release_message_sync(state, header);
3248 unlock_service(service);
3252 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3254 header->msgid = VCHIQ_MSGID_PADDING;
3255 remote_event_signal(&state->remote->sync_release);
3259 vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
3261 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3262 struct vchiq_service *service = find_service_by_handle(handle);
3265 (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3268 *peer_version = service->peer_version;
3269 status = VCHIQ_SUCCESS;
3273 unlock_service(service);
3277 void vchiq_get_config(struct vchiq_config *config)
3279 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3280 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3281 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3282 config->max_services = VCHIQ_MAX_SERVICES;
3283 config->version = VCHIQ_VERSION;
3284 config->version_min = VCHIQ_VERSION_MIN;
3288 vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
3289 VCHIQ_SERVICE_OPTION_T option, int value)
3291 struct vchiq_service *service = find_service_by_handle(handle);
3292 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3296 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3297 service->auto_close = value;
3298 status = VCHIQ_SUCCESS;
3301 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3302 struct vchiq_service_quota *service_quota =
3303 &service->state->service_quotas[
3304 service->localport];
3306 value = service->state->default_slot_quota;
3307 if ((value >= service_quota->slot_use_count) &&
3308 (value < (unsigned short)~0)) {
3309 service_quota->slot_quota = value;
3310 if ((value >= service_quota->slot_use_count) &&
3311 (service_quota->message_quota >=
3312 service_quota->message_use_count)) {
3313 /* Signal the service that it may have
3314 ** dropped below its quota */
3315 complete(&service_quota->quota_event);
3317 status = VCHIQ_SUCCESS;
3321 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3322 struct vchiq_service_quota *service_quota =
3323 &service->state->service_quotas[
3324 service->localport];
3326 value = service->state->default_message_quota;
3327 if ((value >= service_quota->message_use_count) &&
3328 (value < (unsigned short)~0)) {
3329 service_quota->message_quota = value;
3331 service_quota->message_use_count) &&
3332 (service_quota->slot_quota >=
3333 service_quota->slot_use_count))
3334 /* Signal the service that it may have
3335 ** dropped below its quota */
3336 complete(&service_quota->quota_event);
3337 status = VCHIQ_SUCCESS;
3341 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3342 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3343 (service->srvstate ==
3344 VCHIQ_SRVSTATE_LISTENING)) {
3345 service->sync = value;
3346 status = VCHIQ_SUCCESS;
3350 case VCHIQ_SERVICE_OPTION_TRACE:
3351 service->trace = value;
3352 status = VCHIQ_SUCCESS;
3358 unlock_service(service);
3365 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3366 struct vchiq_shared_state *shared, const char *label)
3368 static const char *const debug_names[] = {
3370 "SLOT_HANDLER_COUNT",
3371 "SLOT_HANDLER_LINE",
3375 "AWAIT_COMPLETION_LINE",
3376 "DEQUEUE_MESSAGE_LINE",
3377 "SERVICE_CALLBACK_LINE",
3378 "MSG_QUEUE_FULL_COUNT",
3379 "COMPLETION_QUEUE_FULL_COUNT"
3385 len = snprintf(buf, sizeof(buf),
3386 " %s: slots %d-%d tx_pos=%x recycle=%x",
3387 label, shared->slot_first, shared->slot_last,
3388 shared->tx_pos, shared->slot_queue_recycle);
3389 vchiq_dump(dump_context, buf, len + 1);
3391 len = snprintf(buf, sizeof(buf),
3393 vchiq_dump(dump_context, buf, len + 1);
3395 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3396 struct vchiq_slot_info slot_info =
3397 *SLOT_INFO_FROM_INDEX(state, i);
3398 if (slot_info.use_count != slot_info.release_count) {
3399 len = snprintf(buf, sizeof(buf),
3400 " %d: %d/%d", i, slot_info.use_count,
3401 slot_info.release_count);
3402 vchiq_dump(dump_context, buf, len + 1);
3406 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3407 len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3408 debug_names[i], shared->debug[i], shared->debug[i]);
3409 vchiq_dump(dump_context, buf, len + 1);
3414 vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3420 len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
3421 conn_state_names[state->conn_state]);
3422 vchiq_dump(dump_context, buf, len + 1);
3424 len = snprintf(buf, sizeof(buf),
3425 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3426 state->local->tx_pos,
3427 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3429 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3430 vchiq_dump(dump_context, buf, len + 1);
3432 len = snprintf(buf, sizeof(buf),
3433 " Version: %d (min %d)",
3434 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3435 vchiq_dump(dump_context, buf, len + 1);
3437 if (VCHIQ_ENABLE_STATS) {
3438 len = snprintf(buf, sizeof(buf),
3439 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
3441 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3442 state->stats.error_count);
3443 vchiq_dump(dump_context, buf, len + 1);
3446 len = snprintf(buf, sizeof(buf),
3447 " Slots: %d available (%d data), %d recyclable, %d stalls "
3449 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3450 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3451 state->data_quota - state->data_use_count,
3452 state->local->slot_queue_recycle - state->slot_queue_available,
3453 state->stats.slot_stalls, state->stats.data_stalls);
3454 vchiq_dump(dump_context, buf, len + 1);
3456 vchiq_dump_platform_state(dump_context);
3458 vchiq_dump_shared_state(dump_context, state, state->local, "Local");
3459 vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
3461 vchiq_dump_platform_instances(dump_context);
3463 for (i = 0; i < state->unused_service; i++) {
3464 struct vchiq_service *service = find_service_by_port(state, i);
3467 vchiq_dump_service_state(dump_context, service);
3468 unlock_service(service);
3474 vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3479 len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3480 service->localport, srvstate_names[service->srvstate],
3481 service->ref_count - 1); /*Don't include the lock just taken*/
3483 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3484 char remoteport[30];
3485 struct vchiq_service_quota *service_quota =
3486 &service->state->service_quotas[service->localport];
3487 int fourcc = service->base.fourcc;
3488 int tx_pending, rx_pending;
3490 if (service->remoteport != VCHIQ_PORT_FREE) {
3491 int len2 = snprintf(remoteport, sizeof(remoteport),
3492 "%u", service->remoteport);
3494 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3495 snprintf(remoteport + len2,
3496 sizeof(remoteport) - len2,
3497 " (client %x)", service->client_id);
3499 strcpy(remoteport, "n/a");
3501 len += snprintf(buf + len, sizeof(buf) - len,
3502 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3503 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3505 service_quota->message_use_count,
3506 service_quota->message_quota,
3507 service_quota->slot_use_count,
3508 service_quota->slot_quota);
3510 vchiq_dump(dump_context, buf, len + 1);
3512 tx_pending = service->bulk_tx.local_insert -
3513 service->bulk_tx.remote_insert;
3515 rx_pending = service->bulk_rx.local_insert -
3516 service->bulk_rx.remote_insert;
3518 len = snprintf(buf, sizeof(buf),
3519 " Bulk: tx_pending=%d (size %d),"
3520 " rx_pending=%d (size %d)",
3522 tx_pending ? service->bulk_tx.bulks[
3523 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3525 rx_pending ? service->bulk_rx.bulks[
3526 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3528 if (VCHIQ_ENABLE_STATS) {
3529 vchiq_dump(dump_context, buf, len + 1);
3531 len = snprintf(buf, sizeof(buf),
3532 " Ctrl: tx_count=%d, tx_bytes=%llu, "
3533 "rx_count=%d, rx_bytes=%llu",
3534 service->stats.ctrl_tx_count,
3535 service->stats.ctrl_tx_bytes,
3536 service->stats.ctrl_rx_count,
3537 service->stats.ctrl_rx_bytes);
3538 vchiq_dump(dump_context, buf, len + 1);
3540 len = snprintf(buf, sizeof(buf),
3541 " Bulk: tx_count=%d, tx_bytes=%llu, "
3542 "rx_count=%d, rx_bytes=%llu",
3543 service->stats.bulk_tx_count,
3544 service->stats.bulk_tx_bytes,
3545 service->stats.bulk_rx_count,
3546 service->stats.bulk_rx_bytes);
3547 vchiq_dump(dump_context, buf, len + 1);
3549 len = snprintf(buf, sizeof(buf),
3550 " %d quota stalls, %d slot stalls, "
3551 "%d bulk stalls, %d aborted, %d errors",
3552 service->stats.quota_stalls,
3553 service->stats.slot_stalls,
3554 service->stats.bulk_stalls,
3555 service->stats.bulk_aborted_count,
3556 service->stats.error_count);
3560 vchiq_dump(dump_context, buf, len + 1);
3562 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3563 vchiq_dump_platform_service_state(dump_context, service);
3567 vchiq_loud_error_header(void)
3569 vchiq_log_error(vchiq_core_log_level,
3570 "============================================================"
3571 "================");
3572 vchiq_log_error(vchiq_core_log_level,
3573 "============================================================"
3574 "================");
3575 vchiq_log_error(vchiq_core_log_level, "=====");
3579 vchiq_loud_error_footer(void)
3581 vchiq_log_error(vchiq_core_log_level, "=====");
3582 vchiq_log_error(vchiq_core_log_level,
3583 "============================================================"
3584 "================");
3585 vchiq_log_error(vchiq_core_log_level,
3586 "============================================================"
3587 "================");
3590 VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
3592 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3594 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3595 status = queue_message(state, NULL,
3596 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3601 VCHIQ_STATUS_T vchiq_send_remote_release(struct vchiq_state *state)
3603 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3605 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3606 status = queue_message(state, NULL,
3607 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
3612 VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
3614 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3616 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3617 status = queue_message(state, NULL,
3618 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3623 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3626 const u8 *mem = (const u8 *)void_mem;
3631 while (num_bytes > 0) {
3634 for (offset = 0; offset < 16; offset++) {
3635 if (offset < num_bytes)
3636 s += snprintf(s, 4, "%02x ", mem[offset]);
3638 s += snprintf(s, 4, " ");
3641 for (offset = 0; offset < 16; offset++) {
3642 if (offset < num_bytes) {
3643 u8 ch = mem[offset];
3645 if ((ch < ' ') || (ch > '~'))
3652 if ((label != NULL) && (*label != '\0'))
3653 vchiq_log_trace(VCHIQ_LOG_TRACE,
3654 "%s: %08x: %s", label, addr, line_buf);
3656 vchiq_log_trace(VCHIQ_LOG_TRACE,
3657 "%08x: %s", addr, line_buf);