2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "vchiq_core.h"
36 #define VCHIQ_SLOT_HANDLER_STACK 8192
38 #define HANDLE_STATE_SHIFT 12
40 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
41 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
42 #define SLOT_INDEX_FROM_DATA(state, data) \
43 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
45 #define SLOT_INDEX_FROM_INFO(state, info) \
46 ((unsigned int)(info - state->slot_info))
47 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
48 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
50 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
52 #define SRVTRACE_LEVEL(srv) \
53 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
54 #define SRVTRACE_ENABLED(srv, lev) \
55 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
57 struct vchiq_open_payload {
64 struct vchiq_openack_payload {
69 QMFLAGS_IS_BLOCKING = (1 << 0),
70 QMFLAGS_NO_MUTEX_LOCK = (1 << 1),
71 QMFLAGS_NO_MUTEX_UNLOCK = (1 << 2)
74 /* we require this for consistency between endpoints */
75 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
76 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
77 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
78 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
79 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
80 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
82 /* Run time control of log level, based on KERN_XXX level. */
83 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
84 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
85 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
87 static DEFINE_SPINLOCK(service_spinlock);
88 DEFINE_SPINLOCK(bulk_waiter_spinlock);
89 static DEFINE_SPINLOCK(quota_spinlock);
91 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
92 static unsigned int handle_seq;
94 static const char *const srvstate_names[] = {
107 static const char *const reason_names[] = {
111 "BULK_TRANSMIT_DONE",
113 "BULK_TRANSMIT_ABORTED",
114 "BULK_RECEIVE_ABORTED"
117 static const char *const conn_state_names[] = {
130 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
132 static const char *msg_type_str(unsigned int msg_type)
135 case VCHIQ_MSG_PADDING: return "PADDING";
136 case VCHIQ_MSG_CONNECT: return "CONNECT";
137 case VCHIQ_MSG_OPEN: return "OPEN";
138 case VCHIQ_MSG_OPENACK: return "OPENACK";
139 case VCHIQ_MSG_CLOSE: return "CLOSE";
140 case VCHIQ_MSG_DATA: return "DATA";
141 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
142 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
143 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
144 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
145 case VCHIQ_MSG_PAUSE: return "PAUSE";
146 case VCHIQ_MSG_RESUME: return "RESUME";
147 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
148 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
149 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
155 vchiq_set_service_state(struct vchiq_service *service, int newstate)
157 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
158 service->state->id, service->localport,
159 srvstate_names[service->srvstate],
160 srvstate_names[newstate]);
161 service->srvstate = newstate;
164 struct vchiq_service *
165 find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
167 struct vchiq_service *service;
169 spin_lock(&service_spinlock);
170 service = handle_to_service(handle);
171 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
172 (service->handle == handle)) {
173 WARN_ON(service->ref_count == 0);
174 service->ref_count++;
177 spin_unlock(&service_spinlock);
180 vchiq_log_info(vchiq_core_log_level,
181 "Invalid service handle 0x%x", handle);
186 struct vchiq_service *
187 find_service_by_port(struct vchiq_state *state, int localport)
189 struct vchiq_service *service = NULL;
191 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
192 spin_lock(&service_spinlock);
193 service = state->services[localport];
194 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
195 WARN_ON(service->ref_count == 0);
196 service->ref_count++;
199 spin_unlock(&service_spinlock);
203 vchiq_log_info(vchiq_core_log_level,
204 "Invalid port %d", localport);
209 struct vchiq_service *
210 find_service_for_instance(VCHIQ_INSTANCE_T instance,
211 VCHIQ_SERVICE_HANDLE_T handle)
213 struct vchiq_service *service;
215 spin_lock(&service_spinlock);
216 service = handle_to_service(handle);
217 if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
218 (service->handle == handle) &&
219 (service->instance == instance)) {
220 WARN_ON(service->ref_count == 0);
221 service->ref_count++;
224 spin_unlock(&service_spinlock);
227 vchiq_log_info(vchiq_core_log_level,
228 "Invalid service handle 0x%x", handle);
233 struct vchiq_service *
234 find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
235 VCHIQ_SERVICE_HANDLE_T handle)
237 struct vchiq_service *service;
239 spin_lock(&service_spinlock);
240 service = handle_to_service(handle);
242 ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
243 (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
244 (service->handle == handle) &&
245 (service->instance == instance)) {
246 WARN_ON(service->ref_count == 0);
247 service->ref_count++;
250 spin_unlock(&service_spinlock);
253 vchiq_log_info(vchiq_core_log_level,
254 "Invalid service handle 0x%x", handle);
259 struct vchiq_service *
260 next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
263 struct vchiq_service *service = NULL;
266 spin_lock(&service_spinlock);
267 while (idx < state->unused_service) {
268 struct vchiq_service *srv = state->services[idx++];
270 if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
271 (srv->instance == instance)) {
273 WARN_ON(service->ref_count == 0);
274 service->ref_count++;
278 spin_unlock(&service_spinlock);
286 lock_service(struct vchiq_service *service)
288 spin_lock(&service_spinlock);
291 WARN_ON(service->ref_count == 0);
292 service->ref_count++;
294 spin_unlock(&service_spinlock);
298 unlock_service(struct vchiq_service *service)
300 spin_lock(&service_spinlock);
302 WARN(1, "%s: service is NULL\n", __func__);
305 if (!service->ref_count) {
306 WARN(1, "%s: ref_count is zero\n", __func__);
309 service->ref_count--;
310 if (!service->ref_count) {
311 struct vchiq_state *state = service->state;
313 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
314 state->services[service->localport] = NULL;
319 spin_unlock(&service_spinlock);
321 if (service && service->userdata_term)
322 service->userdata_term(service->base.userdata);
328 vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
330 struct vchiq_service *service = find_service_by_handle(handle);
333 id = service ? service->client_id : 0;
335 unlock_service(service);
341 vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
343 struct vchiq_service *service = handle_to_service(handle);
345 return service ? service->base.userdata : NULL;
349 vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
351 struct vchiq_service *service = handle_to_service(handle);
353 return service ? service->base.fourcc : 0;
357 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
359 struct vchiq_state *state = service->state;
360 struct vchiq_service_quota *service_quota;
362 service->closing = 1;
364 /* Synchronise with other threads. */
365 mutex_lock(&state->recycle_mutex);
366 mutex_unlock(&state->recycle_mutex);
367 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
368 /* If we're pausing then the slot_mutex is held until resume
369 * by the slot handler. Therefore don't try to acquire this
370 * mutex if we're the slot handler and in the pause sent state.
371 * We don't need to in this case anyway. */
372 mutex_lock(&state->slot_mutex);
373 mutex_unlock(&state->slot_mutex);
376 /* Unblock any sending thread. */
377 service_quota = &state->service_quotas[service->localport];
378 complete(&service_quota->quota_event);
382 mark_service_closing(struct vchiq_service *service)
384 mark_service_closing_internal(service, 0);
387 static inline VCHIQ_STATUS_T
388 make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
389 struct vchiq_header *header, void *bulk_userdata)
391 VCHIQ_STATUS_T status;
393 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
394 service->state->id, service->localport, reason_names[reason],
395 header, bulk_userdata);
396 status = service->base.callback(reason, header, service->handle,
398 if (status == VCHIQ_ERROR) {
399 vchiq_log_warning(vchiq_core_log_level,
400 "%d: ignoring ERROR from callback to service %x",
401 service->state->id, service->handle);
402 status = VCHIQ_SUCCESS;
408 vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate)
410 VCHIQ_CONNSTATE_T oldstate = state->conn_state;
412 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
413 conn_state_names[oldstate],
414 conn_state_names[newstate]);
415 state->conn_state = newstate;
416 vchiq_platform_conn_state_changed(state, oldstate, newstate);
420 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
423 /* Don't clear the 'fired' flag because it may already have been set
424 ** by the other side. */
425 init_waitqueue_head(wq);
429 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
434 if (wait_event_killable(*wq, event->fired)) {
447 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
455 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
457 if (event->fired && event->armed)
458 remote_event_signal_local(wq, event);
462 remote_event_pollall(struct vchiq_state *state)
464 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
465 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
466 remote_event_poll(&state->trigger_event, &state->local->trigger);
467 remote_event_poll(&state->recycle_event, &state->local->recycle);
470 /* Round up message sizes so that any space at the end of a slot is always big
471 ** enough for a header. This relies on header size being a power of two, which
472 ** has been verified earlier by a static assertion. */
475 calc_stride(size_t size)
477 /* Allow room for the header */
478 size += sizeof(struct vchiq_header);
481 return (size + sizeof(struct vchiq_header) - 1) &
482 ~(sizeof(struct vchiq_header) - 1);
485 /* Called by the slot handler thread */
486 static struct vchiq_service *
487 get_listening_service(struct vchiq_state *state, int fourcc)
491 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
493 for (i = 0; i < state->unused_service; i++) {
494 struct vchiq_service *service = state->services[i];
497 (service->public_fourcc == fourcc) &&
498 ((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
499 ((service->srvstate == VCHIQ_SRVSTATE_OPEN) &&
500 (service->remoteport == VCHIQ_PORT_FREE)))) {
501 lock_service(service);
509 /* Called by the slot handler thread */
510 static struct vchiq_service *
511 get_connected_service(struct vchiq_state *state, unsigned int port)
515 for (i = 0; i < state->unused_service; i++) {
516 struct vchiq_service *service = state->services[i];
518 if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
519 && (service->remoteport == port)) {
520 lock_service(service);
528 request_poll(struct vchiq_state *state, struct vchiq_service *service,
535 value = atomic_read(&service->poll_flags);
536 } while (atomic_cmpxchg(&service->poll_flags, value,
537 value | (1 << poll_type)) != value);
540 value = atomic_read(&state->poll_services[
541 service->localport>>5]);
542 } while (atomic_cmpxchg(
543 &state->poll_services[service->localport>>5],
544 value, value | (1 << (service->localport & 0x1f)))
548 state->poll_needed = 1;
551 /* ... and ensure the slot handler runs. */
552 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
555 /* Called from queue_message, by the slot handler and application threads,
556 ** with slot_mutex held */
557 static struct vchiq_header *
558 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
560 struct vchiq_shared_state *local = state->local;
561 int tx_pos = state->local_tx_pos;
562 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
564 if (space > slot_space) {
565 struct vchiq_header *header;
566 /* Fill the remaining space with padding */
567 WARN_ON(state->tx_data == NULL);
568 header = (struct vchiq_header *)
569 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
570 header->msgid = VCHIQ_MSGID_PADDING;
571 header->size = slot_space - sizeof(struct vchiq_header);
573 tx_pos += slot_space;
576 /* If necessary, get the next slot. */
577 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
580 /* If there is no free slot... */
582 if (!try_wait_for_completion(&state->slot_available_event)) {
583 /* ...wait for one. */
585 VCHIQ_STATS_INC(state, slot_stalls);
587 /* But first, flush through the last slot. */
588 state->local_tx_pos = tx_pos;
589 local->tx_pos = tx_pos;
590 remote_event_signal(&state->remote->trigger);
593 (wait_for_completion_killable(
594 &state->slot_available_event)))
595 return NULL; /* No space available */
598 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
599 complete(&state->slot_available_event);
600 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
604 slot_index = local->slot_queue[
605 SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
606 VCHIQ_SLOT_QUEUE_MASK];
608 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
611 state->local_tx_pos = tx_pos + space;
613 return (struct vchiq_header *)(state->tx_data +
614 (tx_pos & VCHIQ_SLOT_MASK));
617 /* Called by the recycle thread. */
619 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
622 struct vchiq_shared_state *local = state->local;
623 int slot_queue_available;
625 /* Find slots which have been freed by the other side, and return them
626 ** to the available queue. */
627 slot_queue_available = state->slot_queue_available;
630 * Use a memory barrier to ensure that any state that may have been
631 * modified by another thread is not masked by stale prefetched
636 while (slot_queue_available != local->slot_queue_recycle) {
638 int slot_index = local->slot_queue[slot_queue_available++ &
639 VCHIQ_SLOT_QUEUE_MASK];
640 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
644 * Beware of the address dependency - data is calculated
645 * using an index written by the other side.
649 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
650 state->id, slot_index, data,
651 local->slot_queue_recycle, slot_queue_available);
653 /* Initialise the bitmask for services which have used this
655 memset(service_found, 0, length);
659 while (pos < VCHIQ_SLOT_SIZE) {
660 struct vchiq_header *header =
661 (struct vchiq_header *)(data + pos);
662 int msgid = header->msgid;
664 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
665 int port = VCHIQ_MSG_SRCPORT(msgid);
666 struct vchiq_service_quota *service_quota =
667 &state->service_quotas[port];
670 spin_lock("a_spinlock);
671 count = service_quota->message_use_count;
673 service_quota->message_use_count =
675 spin_unlock("a_spinlock);
677 if (count == service_quota->message_quota)
678 /* Signal the service that it
679 ** has dropped below its quota
681 complete(&service_quota->quota_event);
682 else if (count == 0) {
683 vchiq_log_error(vchiq_core_log_level,
684 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
686 service_quota->message_use_count,
687 header, msgid, header->msgid,
689 WARN(1, "invalid message use count\n");
691 if (!BITSET_IS_SET(service_found, port)) {
692 /* Set the found bit for this service */
693 BITSET_SET(service_found, port);
695 spin_lock("a_spinlock);
696 count = service_quota->slot_use_count;
698 service_quota->slot_use_count =
700 spin_unlock("a_spinlock);
703 /* Signal the service in case
704 ** it has dropped below its
706 complete(&service_quota->quota_event);
708 vchiq_core_log_level,
709 "%d: pfq:%d %x@%pK - slot_use->%d",
711 header->size, header,
715 vchiq_core_log_level,
716 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
718 msgid, header->msgid,
720 WARN(1, "bad slot use count\n");
727 pos += calc_stride(header->size);
728 if (pos > VCHIQ_SLOT_SIZE) {
729 vchiq_log_error(vchiq_core_log_level,
730 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
731 pos, header, msgid, header->msgid,
733 WARN(1, "invalid slot position\n");
740 spin_lock("a_spinlock);
741 count = state->data_use_count;
743 state->data_use_count =
745 spin_unlock("a_spinlock);
746 if (count == state->data_quota)
747 complete(&state->data_quota_event);
751 * Don't allow the slot to be reused until we are no
752 * longer interested in it.
756 state->slot_queue_available = slot_queue_available;
757 complete(&state->slot_available_event);
762 memcpy_copy_callback(
763 void *context, void *dest,
764 size_t offset, size_t maxsize)
766 memcpy(dest + offset, context + offset, maxsize);
772 ssize_t (*copy_callback)(void *context, void *dest,
773 size_t offset, size_t maxsize),
781 ssize_t callback_result;
782 size_t max_bytes = size - pos;
785 copy_callback(context, dest + pos,
788 if (callback_result < 0)
789 return callback_result;
791 if (!callback_result)
794 if (callback_result > max_bytes)
797 pos += callback_result;
803 /* Called by the slot handler and application threads */
804 static VCHIQ_STATUS_T
805 queue_message(struct vchiq_state *state, struct vchiq_service *service,
807 ssize_t (*copy_callback)(void *context, void *dest,
808 size_t offset, size_t maxsize),
809 void *context, size_t size, int flags)
811 struct vchiq_shared_state *local;
812 struct vchiq_service_quota *service_quota = NULL;
813 struct vchiq_header *header;
814 int type = VCHIQ_MSG_TYPE(msgid);
818 local = state->local;
820 stride = calc_stride(size);
822 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
824 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
825 (mutex_lock_killable(&state->slot_mutex) != 0))
828 if (type == VCHIQ_MSG_DATA) {
832 WARN(1, "%s: service is NULL\n", __func__);
833 mutex_unlock(&state->slot_mutex);
837 WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
838 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
840 if (service->closing) {
841 /* The service has been closed */
842 mutex_unlock(&state->slot_mutex);
846 service_quota = &state->service_quotas[service->localport];
848 spin_lock("a_spinlock);
850 /* Ensure this service doesn't use more than its quota of
851 ** messages or slots */
852 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
853 state->local_tx_pos + stride - 1);
855 /* Ensure data messages don't use more than their quota of
857 while ((tx_end_index != state->previous_data_index) &&
858 (state->data_use_count == state->data_quota)) {
859 VCHIQ_STATS_INC(state, data_stalls);
860 spin_unlock("a_spinlock);
861 mutex_unlock(&state->slot_mutex);
863 if (wait_for_completion_killable(
864 &state->data_quota_event))
867 mutex_lock(&state->slot_mutex);
868 spin_lock("a_spinlock);
869 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
870 state->local_tx_pos + stride - 1);
871 if ((tx_end_index == state->previous_data_index) ||
872 (state->data_use_count < state->data_quota)) {
873 /* Pass the signal on to other waiters */
874 complete(&state->data_quota_event);
879 while ((service_quota->message_use_count ==
880 service_quota->message_quota) ||
881 ((tx_end_index != service_quota->previous_tx_index) &&
882 (service_quota->slot_use_count ==
883 service_quota->slot_quota))) {
884 spin_unlock("a_spinlock);
885 vchiq_log_trace(vchiq_core_log_level,
886 "%d: qm:%d %s,%zx - quota stall "
888 state->id, service->localport,
889 msg_type_str(type), size,
890 service_quota->message_use_count,
891 service_quota->slot_use_count);
892 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
893 mutex_unlock(&state->slot_mutex);
894 if (wait_for_completion_killable(
895 &service_quota->quota_event))
897 if (service->closing)
899 if (mutex_lock_killable(&state->slot_mutex) != 0)
901 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
902 /* The service has been closed */
903 mutex_unlock(&state->slot_mutex);
906 spin_lock("a_spinlock);
907 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
908 state->local_tx_pos + stride - 1);
911 spin_unlock("a_spinlock);
914 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
918 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
919 /* In the event of a failure, return the mutex to the
921 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
922 mutex_unlock(&state->slot_mutex);
926 if (type == VCHIQ_MSG_DATA) {
927 ssize_t callback_result;
931 vchiq_log_info(vchiq_core_log_level,
932 "%d: qm %s@%pK,%zx (%d->%d)",
933 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
934 header, size, VCHIQ_MSG_SRCPORT(msgid),
935 VCHIQ_MSG_DSTPORT(msgid));
937 WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
938 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
941 copy_message_data(copy_callback, context,
944 if (callback_result < 0) {
945 mutex_unlock(&state->slot_mutex);
946 VCHIQ_SERVICE_STATS_INC(service,
951 if (SRVTRACE_ENABLED(service,
953 vchiq_log_dump_mem("Sent", 0,
956 (size_t)callback_result));
958 spin_lock("a_spinlock);
959 service_quota->message_use_count++;
962 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
964 /* If this transmission can't fit in the last slot used by any
965 ** service, the data_use_count must be increased. */
966 if (tx_end_index != state->previous_data_index) {
967 state->previous_data_index = tx_end_index;
968 state->data_use_count++;
971 /* If this isn't the same slot last used by this service,
972 ** the service's slot_use_count must be increased. */
973 if (tx_end_index != service_quota->previous_tx_index) {
974 service_quota->previous_tx_index = tx_end_index;
975 slot_use_count = ++service_quota->slot_use_count;
980 spin_unlock("a_spinlock);
983 vchiq_log_trace(vchiq_core_log_level,
984 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
985 state->id, service->localport,
986 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
987 slot_use_count, header);
989 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
990 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
992 vchiq_log_info(vchiq_core_log_level,
993 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
994 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
995 header, size, VCHIQ_MSG_SRCPORT(msgid),
996 VCHIQ_MSG_DSTPORT(msgid));
998 /* It is assumed for now that this code path
999 * only happens from calls inside this file.
1001 * External callers are through the vchiq_queue_message
1002 * path which always sets the type to be VCHIQ_MSG_DATA
1004 * At first glance this appears to be correct but
1005 * more review is needed.
1007 copy_message_data(copy_callback, context,
1008 header->data, size);
1010 VCHIQ_STATS_INC(state, ctrl_tx_count);
1013 header->msgid = msgid;
1014 header->size = size;
1019 svc_fourcc = service
1020 ? service->base.fourcc
1021 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1023 vchiq_log_info(SRVTRACE_LEVEL(service),
1024 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1025 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1026 VCHIQ_MSG_TYPE(msgid),
1027 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1028 VCHIQ_MSG_SRCPORT(msgid),
1029 VCHIQ_MSG_DSTPORT(msgid),
1033 /* Make sure the new header is visible to the peer. */
1036 /* Make the new tx_pos visible to the peer. */
1037 local->tx_pos = state->local_tx_pos;
1040 if (service && (type == VCHIQ_MSG_CLOSE))
1041 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1043 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1044 mutex_unlock(&state->slot_mutex);
1046 remote_event_signal(&state->remote->trigger);
1048 return VCHIQ_SUCCESS;
1051 /* Called by the slot handler and application threads */
1052 static VCHIQ_STATUS_T
1053 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1055 ssize_t (*copy_callback)(void *context, void *dest,
1056 size_t offset, size_t maxsize),
1057 void *context, int size, int is_blocking)
1059 struct vchiq_shared_state *local;
1060 struct vchiq_header *header;
1061 ssize_t callback_result;
1063 local = state->local;
1065 if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
1066 (mutex_lock_killable(&state->sync_mutex) != 0))
1069 remote_event_wait(&state->sync_release_event, &local->sync_release);
1073 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1077 int oldmsgid = header->msgid;
1079 if (oldmsgid != VCHIQ_MSGID_PADDING)
1080 vchiq_log_error(vchiq_core_log_level,
1081 "%d: qms - msgid %x, not PADDING",
1082 state->id, oldmsgid);
1085 vchiq_log_info(vchiq_sync_log_level,
1086 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1087 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1088 header, size, VCHIQ_MSG_SRCPORT(msgid),
1089 VCHIQ_MSG_DSTPORT(msgid));
1092 copy_message_data(copy_callback, context,
1093 header->data, size);
1095 if (callback_result < 0) {
1096 mutex_unlock(&state->slot_mutex);
1097 VCHIQ_SERVICE_STATS_INC(service,
1103 if (SRVTRACE_ENABLED(service,
1105 vchiq_log_dump_mem("Sent", 0,
1108 (size_t)callback_result));
1110 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1111 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1113 VCHIQ_STATS_INC(state, ctrl_tx_count);
1116 header->size = size;
1117 header->msgid = msgid;
1119 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1122 svc_fourcc = service
1123 ? service->base.fourcc
1124 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1126 vchiq_log_trace(vchiq_sync_log_level,
1127 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1128 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1129 VCHIQ_MSG_TYPE(msgid),
1130 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1131 VCHIQ_MSG_SRCPORT(msgid),
1132 VCHIQ_MSG_DSTPORT(msgid),
1136 remote_event_signal(&state->remote->sync_trigger);
1138 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1139 mutex_unlock(&state->sync_mutex);
1141 return VCHIQ_SUCCESS;
1145 claim_slot(struct vchiq_slot_info *slot)
1151 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1152 struct vchiq_header *header, struct vchiq_service *service)
1156 mutex_lock(&state->recycle_mutex);
1159 int msgid = header->msgid;
1161 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1162 (service && service->closing)) {
1163 mutex_unlock(&state->recycle_mutex);
1167 /* Rewrite the message header to prevent a double
1169 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1172 release_count = slot_info->release_count;
1173 slot_info->release_count = ++release_count;
1175 if (release_count == slot_info->use_count) {
1176 int slot_queue_recycle;
1177 /* Add to the freed queue */
1179 /* A read barrier is necessary here to prevent speculative
1180 ** fetches of remote->slot_queue_recycle from overtaking the
1184 slot_queue_recycle = state->remote->slot_queue_recycle;
1185 state->remote->slot_queue[slot_queue_recycle &
1186 VCHIQ_SLOT_QUEUE_MASK] =
1187 SLOT_INDEX_FROM_INFO(state, slot_info);
1188 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1189 vchiq_log_info(vchiq_core_log_level,
1190 "%d: %s %d - recycle->%x", state->id, __func__,
1191 SLOT_INDEX_FROM_INFO(state, slot_info),
1192 state->remote->slot_queue_recycle);
1194 /* A write barrier is necessary, but remote_event_signal
1196 remote_event_signal(&state->remote->recycle);
1199 mutex_unlock(&state->recycle_mutex);
1202 /* Called by the slot handler - don't hold the bulk mutex */
1203 static VCHIQ_STATUS_T
1204 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1207 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1209 vchiq_log_trace(vchiq_core_log_level,
1210 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1211 service->state->id, service->localport,
1212 (queue == &service->bulk_tx) ? 't' : 'r',
1213 queue->process, queue->remote_notify, queue->remove);
1215 queue->remote_notify = queue->process;
1217 if (status == VCHIQ_SUCCESS) {
1218 while (queue->remove != queue->remote_notify) {
1219 struct vchiq_bulk *bulk =
1220 &queue->bulks[BULK_INDEX(queue->remove)];
1222 /* Only generate callbacks for non-dummy bulk
1223 ** requests, and non-terminated services */
1224 if (bulk->data && service->instance) {
1225 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1226 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1227 VCHIQ_SERVICE_STATS_INC(service,
1229 VCHIQ_SERVICE_STATS_ADD(service,
1233 VCHIQ_SERVICE_STATS_INC(service,
1235 VCHIQ_SERVICE_STATS_ADD(service,
1240 VCHIQ_SERVICE_STATS_INC(service,
1241 bulk_aborted_count);
1243 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1244 struct bulk_waiter *waiter;
1246 spin_lock(&bulk_waiter_spinlock);
1247 waiter = bulk->userdata;
1249 waiter->actual = bulk->actual;
1250 complete(&waiter->event);
1252 spin_unlock(&bulk_waiter_spinlock);
1253 } else if (bulk->mode ==
1254 VCHIQ_BULK_MODE_CALLBACK) {
1255 VCHIQ_REASON_T reason = (bulk->dir ==
1256 VCHIQ_BULK_TRANSMIT) ?
1258 VCHIQ_BULK_ACTUAL_ABORTED) ?
1259 VCHIQ_BULK_TRANSMIT_ABORTED :
1260 VCHIQ_BULK_TRANSMIT_DONE) :
1262 VCHIQ_BULK_ACTUAL_ABORTED) ?
1263 VCHIQ_BULK_RECEIVE_ABORTED :
1264 VCHIQ_BULK_RECEIVE_DONE);
1265 status = make_service_callback(service,
1266 reason, NULL, bulk->userdata);
1267 if (status == VCHIQ_RETRY)
1273 complete(&service->bulk_remove_event);
1276 status = VCHIQ_SUCCESS;
1279 if (status == VCHIQ_RETRY)
1280 request_poll(service->state, service,
1281 (queue == &service->bulk_tx) ?
1282 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1287 /* Called by the slot handler thread */
1289 poll_services(struct vchiq_state *state)
1293 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1296 flags = atomic_xchg(&state->poll_services[group], 0);
1297 for (i = 0; flags; i++) {
1298 if (flags & (1 << i)) {
1299 struct vchiq_service *service =
1300 find_service_by_port(state,
1308 atomic_xchg(&service->poll_flags, 0);
1310 (1 << VCHIQ_POLL_REMOVE)) {
1311 vchiq_log_info(vchiq_core_log_level,
1312 "%d: ps - remove %d<->%d",
1313 state->id, service->localport,
1314 service->remoteport);
1316 /* Make it look like a client, because
1317 it must be removed and not left in
1318 the LISTENING state. */
1319 service->public_fourcc =
1320 VCHIQ_FOURCC_INVALID;
1322 if (vchiq_close_service_internal(
1323 service, 0/*!close_recvd*/) !=
1325 request_poll(state, service,
1327 } else if (service_flags &
1328 (1 << VCHIQ_POLL_TERMINATE)) {
1329 vchiq_log_info(vchiq_core_log_level,
1330 "%d: ps - terminate %d<->%d",
1331 state->id, service->localport,
1332 service->remoteport);
1333 if (vchiq_close_service_internal(
1334 service, 0/*!close_recvd*/) !=
1336 request_poll(state, service,
1337 VCHIQ_POLL_TERMINATE);
1339 if (service_flags & (1 << VCHIQ_POLL_TXNOTIFY))
1340 notify_bulks(service,
1343 if (service_flags & (1 << VCHIQ_POLL_RXNOTIFY))
1344 notify_bulks(service,
1347 unlock_service(service);
1353 /* Called with the bulk_mutex held */
1355 abort_outstanding_bulks(struct vchiq_service *service,
1356 struct vchiq_bulk_queue *queue)
1358 int is_tx = (queue == &service->bulk_tx);
1360 vchiq_log_trace(vchiq_core_log_level,
1361 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1362 service->state->id, service->localport, is_tx ? 't' : 'r',
1363 queue->local_insert, queue->remote_insert, queue->process);
1365 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1366 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1368 while ((queue->process != queue->local_insert) ||
1369 (queue->process != queue->remote_insert)) {
1370 struct vchiq_bulk *bulk =
1371 &queue->bulks[BULK_INDEX(queue->process)];
1373 if (queue->process == queue->remote_insert) {
1374 /* fabricate a matching dummy bulk */
1375 bulk->remote_data = NULL;
1376 bulk->remote_size = 0;
1377 queue->remote_insert++;
1380 if (queue->process != queue->local_insert) {
1381 vchiq_complete_bulk(bulk);
1383 vchiq_log_info(SRVTRACE_LEVEL(service),
1384 "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
1386 is_tx ? "Send Bulk to" : "Recv Bulk from",
1387 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1388 service->remoteport,
1392 /* fabricate a matching dummy bulk */
1395 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1396 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1398 queue->local_insert++;
1406 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1408 struct vchiq_service *service = NULL;
1410 unsigned int localport, remoteport;
1412 msgid = header->msgid;
1413 size = header->size;
1414 localport = VCHIQ_MSG_DSTPORT(msgid);
1415 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1416 if (size >= sizeof(struct vchiq_open_payload)) {
1417 const struct vchiq_open_payload *payload =
1418 (struct vchiq_open_payload *)header->data;
1419 unsigned int fourcc;
1421 fourcc = payload->fourcc;
1422 vchiq_log_info(vchiq_core_log_level,
1423 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1424 state->id, header, localport,
1425 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1427 service = get_listening_service(state, fourcc);
1430 /* A matching service exists */
1431 short version = payload->version;
1432 short version_min = payload->version_min;
1434 if ((service->version < version_min) ||
1435 (version < service->version_min)) {
1436 /* Version mismatch */
1437 vchiq_loud_error_header();
1438 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1439 "version mismatch - local (%d, min %d)"
1440 " vs. remote (%d, min %d)",
1441 state->id, service->localport,
1442 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1443 service->version, service->version_min,
1444 version, version_min);
1445 vchiq_loud_error_footer();
1446 unlock_service(service);
1450 service->peer_version = version;
1452 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1453 struct vchiq_openack_payload ack_payload = {
1457 if (state->version_common <
1458 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1461 /* Acknowledge the OPEN */
1462 if (service->sync) {
1463 if (queue_message_sync(
1470 memcpy_copy_callback,
1472 sizeof(ack_payload),
1474 goto bail_not_ready;
1476 if (queue_message(state,
1482 memcpy_copy_callback,
1484 sizeof(ack_payload),
1486 goto bail_not_ready;
1489 /* The service is now open */
1490 vchiq_set_service_state(service,
1491 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1492 : VCHIQ_SRVSTATE_OPEN);
1495 service->remoteport = remoteport;
1496 service->client_id = ((int *)header->data)[1];
1497 if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
1498 NULL, NULL) == VCHIQ_RETRY) {
1499 /* Bail out if not ready */
1500 service->remoteport = VCHIQ_PORT_FREE;
1501 goto bail_not_ready;
1504 /* Success - the message has been dealt with */
1505 unlock_service(service);
1511 /* No available service, or an invalid request - send a CLOSE */
1512 if (queue_message(state, NULL,
1513 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1514 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1515 goto bail_not_ready;
1521 unlock_service(service);
1526 /* Called by the slot handler thread */
1528 parse_rx_slots(struct vchiq_state *state)
1530 struct vchiq_shared_state *remote = state->remote;
1531 struct vchiq_service *service = NULL;
1534 DEBUG_INITIALISE(state->local)
1536 tx_pos = remote->tx_pos;
1538 while (state->rx_pos != tx_pos) {
1539 struct vchiq_header *header;
1542 unsigned int localport, remoteport;
1544 DEBUG_TRACE(PARSE_LINE);
1545 if (!state->rx_data) {
1548 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1549 rx_index = remote->slot_queue[
1550 SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1551 VCHIQ_SLOT_QUEUE_MASK];
1552 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1554 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1556 /* Initialise use_count to one, and increment
1557 ** release_count at the end of the slot to avoid
1558 ** releasing the slot prematurely. */
1559 state->rx_info->use_count = 1;
1560 state->rx_info->release_count = 0;
1563 header = (struct vchiq_header *)(state->rx_data +
1564 (state->rx_pos & VCHIQ_SLOT_MASK));
1565 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1566 msgid = header->msgid;
1567 DEBUG_VALUE(PARSE_MSGID, msgid);
1568 size = header->size;
1569 type = VCHIQ_MSG_TYPE(msgid);
1570 localport = VCHIQ_MSG_DSTPORT(msgid);
1571 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1573 if (type != VCHIQ_MSG_DATA)
1574 VCHIQ_STATS_INC(state, ctrl_rx_count);
1577 case VCHIQ_MSG_OPENACK:
1578 case VCHIQ_MSG_CLOSE:
1579 case VCHIQ_MSG_DATA:
1580 case VCHIQ_MSG_BULK_RX:
1581 case VCHIQ_MSG_BULK_TX:
1582 case VCHIQ_MSG_BULK_RX_DONE:
1583 case VCHIQ_MSG_BULK_TX_DONE:
1584 service = find_service_by_port(state, localport);
1586 ((service->remoteport != remoteport) &&
1587 (service->remoteport != VCHIQ_PORT_FREE))) &&
1589 (type == VCHIQ_MSG_CLOSE)) {
1590 /* This could be a CLOSE from a client which
1591 hadn't yet received the OPENACK - look for
1592 the connected service */
1594 unlock_service(service);
1595 service = get_connected_service(state,
1598 vchiq_log_warning(vchiq_core_log_level,
1599 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1600 state->id, msg_type_str(type),
1601 header, remoteport, localport,
1602 service->localport);
1606 vchiq_log_error(vchiq_core_log_level,
1607 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1608 state->id, msg_type_str(type),
1609 header, remoteport, localport,
1618 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1621 svc_fourcc = service
1622 ? service->base.fourcc
1623 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1624 vchiq_log_info(SRVTRACE_LEVEL(service),
1625 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
1627 msg_type_str(type), type,
1628 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1629 remoteport, localport, size);
1631 vchiq_log_dump_mem("Rcvd", 0, header->data,
1635 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1636 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1637 vchiq_log_error(vchiq_core_log_level,
1638 "header %pK (msgid %x) - size %x too big for slot",
1639 header, (unsigned int)msgid,
1640 (unsigned int)size);
1641 WARN(1, "oversized for slot\n");
1645 case VCHIQ_MSG_OPEN:
1646 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1647 if (!parse_open(state, header))
1648 goto bail_not_ready;
1650 case VCHIQ_MSG_OPENACK:
1651 if (size >= sizeof(struct vchiq_openack_payload)) {
1652 const struct vchiq_openack_payload *payload =
1653 (struct vchiq_openack_payload *)
1655 service->peer_version = payload->version;
1657 vchiq_log_info(vchiq_core_log_level,
1658 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1659 state->id, header, size, remoteport, localport,
1660 service->peer_version);
1661 if (service->srvstate ==
1662 VCHIQ_SRVSTATE_OPENING) {
1663 service->remoteport = remoteport;
1664 vchiq_set_service_state(service,
1665 VCHIQ_SRVSTATE_OPEN);
1666 complete(&service->remove_event);
1668 vchiq_log_error(vchiq_core_log_level,
1669 "OPENACK received in state %s",
1670 srvstate_names[service->srvstate]);
1672 case VCHIQ_MSG_CLOSE:
1673 WARN_ON(size != 0); /* There should be no data */
1675 vchiq_log_info(vchiq_core_log_level,
1676 "%d: prs CLOSE@%pK (%d->%d)",
1677 state->id, header, remoteport, localport);
1679 mark_service_closing_internal(service, 1);
1681 if (vchiq_close_service_internal(service,
1682 1/*close_recvd*/) == VCHIQ_RETRY)
1683 goto bail_not_ready;
1685 vchiq_log_info(vchiq_core_log_level,
1686 "Close Service %c%c%c%c s:%u d:%d",
1687 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1689 service->remoteport);
1691 case VCHIQ_MSG_DATA:
1692 vchiq_log_info(vchiq_core_log_level,
1693 "%d: prs DATA@%pK,%x (%d->%d)",
1694 state->id, header, size, remoteport, localport);
1696 if ((service->remoteport == remoteport)
1697 && (service->srvstate ==
1698 VCHIQ_SRVSTATE_OPEN)) {
1699 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1700 claim_slot(state->rx_info);
1701 DEBUG_TRACE(PARSE_LINE);
1702 if (make_service_callback(service,
1703 VCHIQ_MESSAGE_AVAILABLE, header,
1704 NULL) == VCHIQ_RETRY) {
1705 DEBUG_TRACE(PARSE_LINE);
1706 goto bail_not_ready;
1708 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1709 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1712 VCHIQ_STATS_INC(state, error_count);
1715 case VCHIQ_MSG_CONNECT:
1716 vchiq_log_info(vchiq_core_log_level,
1717 "%d: prs CONNECT@%pK", state->id, header);
1718 state->version_common = ((struct vchiq_slot_zero *)
1719 state->slot_data)->version;
1720 complete(&state->connect);
1722 case VCHIQ_MSG_BULK_RX:
1723 case VCHIQ_MSG_BULK_TX:
1725 * We should never receive a bulk request from the
1726 * other side since we're not setup to perform as the
1731 case VCHIQ_MSG_BULK_RX_DONE:
1732 case VCHIQ_MSG_BULK_TX_DONE:
1733 if ((service->remoteport == remoteport)
1734 && (service->srvstate !=
1735 VCHIQ_SRVSTATE_FREE)) {
1736 struct vchiq_bulk_queue *queue;
1737 struct vchiq_bulk *bulk;
1739 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1740 &service->bulk_rx : &service->bulk_tx;
1742 DEBUG_TRACE(PARSE_LINE);
1743 if (mutex_lock_killable(&service->bulk_mutex)) {
1744 DEBUG_TRACE(PARSE_LINE);
1745 goto bail_not_ready;
1747 if ((int)(queue->remote_insert -
1748 queue->local_insert) >= 0) {
1749 vchiq_log_error(vchiq_core_log_level,
1750 "%d: prs %s@%pK (%d->%d) "
1751 "unexpected (ri=%d,li=%d)",
1752 state->id, msg_type_str(type),
1753 header, remoteport, localport,
1754 queue->remote_insert,
1755 queue->local_insert);
1756 mutex_unlock(&service->bulk_mutex);
1759 if (queue->process != queue->remote_insert) {
1760 pr_err("%s: p %x != ri %x\n",
1763 queue->remote_insert);
1764 mutex_unlock(&service->bulk_mutex);
1765 goto bail_not_ready;
1768 bulk = &queue->bulks[
1769 BULK_INDEX(queue->remote_insert)];
1770 bulk->actual = *(int *)header->data;
1771 queue->remote_insert++;
1773 vchiq_log_info(vchiq_core_log_level,
1774 "%d: prs %s@%pK (%d->%d) %x@%pK",
1775 state->id, msg_type_str(type),
1776 header, remoteport, localport,
1777 bulk->actual, bulk->data);
1779 vchiq_log_trace(vchiq_core_log_level,
1780 "%d: prs:%d %cx li=%x ri=%x p=%x",
1781 state->id, localport,
1782 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1784 queue->local_insert,
1785 queue->remote_insert, queue->process);
1787 DEBUG_TRACE(PARSE_LINE);
1788 WARN_ON(queue->process == queue->local_insert);
1789 vchiq_complete_bulk(bulk);
1791 mutex_unlock(&service->bulk_mutex);
1792 DEBUG_TRACE(PARSE_LINE);
1793 notify_bulks(service, queue, 1/*retry_poll*/);
1794 DEBUG_TRACE(PARSE_LINE);
1797 case VCHIQ_MSG_PADDING:
1798 vchiq_log_trace(vchiq_core_log_level,
1799 "%d: prs PADDING@%pK,%x",
1800 state->id, header, size);
1802 case VCHIQ_MSG_PAUSE:
1803 /* If initiated, signal the application thread */
1804 vchiq_log_trace(vchiq_core_log_level,
1805 "%d: prs PAUSE@%pK,%x",
1806 state->id, header, size);
1807 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1808 vchiq_log_error(vchiq_core_log_level,
1809 "%d: PAUSE received in state PAUSED",
1813 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1814 /* Send a PAUSE in response */
1815 if (queue_message(state, NULL,
1816 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1817 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1819 goto bail_not_ready;
1821 /* At this point slot_mutex is held */
1822 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1823 vchiq_platform_paused(state);
1825 case VCHIQ_MSG_RESUME:
1826 vchiq_log_trace(vchiq_core_log_level,
1827 "%d: prs RESUME@%pK,%x",
1828 state->id, header, size);
1829 /* Release the slot mutex */
1830 mutex_unlock(&state->slot_mutex);
1831 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1832 vchiq_platform_resumed(state);
1835 case VCHIQ_MSG_REMOTE_USE:
1836 vchiq_on_remote_use(state);
1838 case VCHIQ_MSG_REMOTE_RELEASE:
1839 vchiq_on_remote_release(state);
1841 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1842 vchiq_on_remote_use_active(state);
1846 vchiq_log_error(vchiq_core_log_level,
1847 "%d: prs invalid msgid %x@%pK,%x",
1848 state->id, msgid, header, size);
1849 WARN(1, "invalid message\n");
1855 unlock_service(service);
1859 state->rx_pos += calc_stride(size);
1861 DEBUG_TRACE(PARSE_LINE);
1862 /* Perform some housekeeping when the end of the slot is
1864 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1865 /* Remove the extra reference count. */
1866 release_slot(state, state->rx_info, NULL, NULL);
1867 state->rx_data = NULL;
1873 unlock_service(service);
1876 /* Called by the slot handler thread */
1878 slot_handler_func(void *v)
1880 struct vchiq_state *state = (struct vchiq_state *)v;
1881 struct vchiq_shared_state *local = state->local;
1883 DEBUG_INITIALISE(local)
1886 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1887 DEBUG_TRACE(SLOT_HANDLER_LINE);
1888 remote_event_wait(&state->trigger_event, &local->trigger);
1892 DEBUG_TRACE(SLOT_HANDLER_LINE);
1893 if (state->poll_needed) {
1894 /* Check if we need to suspend - may change our
1896 vchiq_platform_check_suspend(state);
1898 state->poll_needed = 0;
1900 /* Handle service polling and other rare conditions here
1901 ** out of the mainline code */
1902 switch (state->conn_state) {
1903 case VCHIQ_CONNSTATE_CONNECTED:
1904 /* Poll the services as requested */
1905 poll_services(state);
1908 case VCHIQ_CONNSTATE_PAUSING:
1909 if (queue_message(state, NULL,
1910 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1912 QMFLAGS_NO_MUTEX_UNLOCK)
1914 vchiq_set_conn_state(state,
1915 VCHIQ_CONNSTATE_PAUSE_SENT);
1918 state->poll_needed = 1;
1922 case VCHIQ_CONNSTATE_PAUSED:
1923 vchiq_platform_resume(state);
1926 case VCHIQ_CONNSTATE_RESUMING:
1927 if (queue_message(state, NULL,
1928 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1929 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1931 vchiq_set_conn_state(state,
1932 VCHIQ_CONNSTATE_CONNECTED);
1933 vchiq_platform_resumed(state);
1935 /* This should really be impossible,
1936 ** since the PAUSE should have flushed
1937 ** through outstanding messages. */
1938 vchiq_log_error(vchiq_core_log_level,
1939 "Failed to send RESUME "
1944 case VCHIQ_CONNSTATE_PAUSE_TIMEOUT:
1945 case VCHIQ_CONNSTATE_RESUME_TIMEOUT:
1946 vchiq_platform_handle_timeout(state);
1954 DEBUG_TRACE(SLOT_HANDLER_LINE);
1955 parse_rx_slots(state);
1960 /* Called by the recycle thread */
1962 recycle_func(void *v)
1964 struct vchiq_state *state = (struct vchiq_state *)v;
1965 struct vchiq_shared_state *local = state->local;
1969 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1971 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1977 remote_event_wait(&state->recycle_event, &local->recycle);
1979 process_free_queue(state, found, length);
1984 /* Called by the sync thread */
1988 struct vchiq_state *state = (struct vchiq_state *)v;
1989 struct vchiq_shared_state *local = state->local;
1990 struct vchiq_header *header =
1991 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1992 state->remote->slot_sync);
1995 struct vchiq_service *service;
1998 unsigned int localport, remoteport;
2000 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2004 msgid = header->msgid;
2005 size = header->size;
2006 type = VCHIQ_MSG_TYPE(msgid);
2007 localport = VCHIQ_MSG_DSTPORT(msgid);
2008 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2010 service = find_service_by_port(state, localport);
2013 vchiq_log_error(vchiq_sync_log_level,
2014 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2015 state->id, msg_type_str(type),
2016 header, remoteport, localport, localport);
2017 release_message_sync(state, header);
2021 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2024 svc_fourcc = service
2025 ? service->base.fourcc
2026 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2027 vchiq_log_trace(vchiq_sync_log_level,
2028 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2030 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2031 remoteport, localport, size);
2033 vchiq_log_dump_mem("Rcvd", 0, header->data,
2038 case VCHIQ_MSG_OPENACK:
2039 if (size >= sizeof(struct vchiq_openack_payload)) {
2040 const struct vchiq_openack_payload *payload =
2041 (struct vchiq_openack_payload *)
2043 service->peer_version = payload->version;
2045 vchiq_log_info(vchiq_sync_log_level,
2046 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2047 state->id, header, size, remoteport, localport,
2048 service->peer_version);
2049 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2050 service->remoteport = remoteport;
2051 vchiq_set_service_state(service,
2052 VCHIQ_SRVSTATE_OPENSYNC);
2054 complete(&service->remove_event);
2056 release_message_sync(state, header);
2059 case VCHIQ_MSG_DATA:
2060 vchiq_log_trace(vchiq_sync_log_level,
2061 "%d: sf DATA@%pK,%x (%d->%d)",
2062 state->id, header, size, remoteport, localport);
2064 if ((service->remoteport == remoteport) &&
2065 (service->srvstate ==
2066 VCHIQ_SRVSTATE_OPENSYNC)) {
2067 if (make_service_callback(service,
2068 VCHIQ_MESSAGE_AVAILABLE, header,
2069 NULL) == VCHIQ_RETRY)
2070 vchiq_log_error(vchiq_sync_log_level,
2071 "synchronous callback to "
2072 "service %d returns "
2079 vchiq_log_error(vchiq_sync_log_level,
2080 "%d: sf unexpected msgid %x@%pK,%x",
2081 state->id, msgid, header, size);
2082 release_message_sync(state, header);
2086 unlock_service(service);
2093 init_bulk_queue(struct vchiq_bulk_queue *queue)
2095 queue->local_insert = 0;
2096 queue->remote_insert = 0;
2098 queue->remote_notify = 0;
2103 get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
2105 return conn_state_names[conn_state];
2108 struct vchiq_slot_zero *
2109 vchiq_init_slots(void *mem_base, int mem_size)
2112 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2113 struct vchiq_slot_zero *slot_zero =
2114 (struct vchiq_slot_zero *)((char *)mem_base + mem_align);
2115 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2116 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2118 /* Ensure there is enough memory to run an absolutely minimum system */
2119 num_slots -= first_data_slot;
2121 if (num_slots < 4) {
2122 vchiq_log_error(vchiq_core_log_level,
2123 "%s - insufficient memory %x bytes",
2124 __func__, mem_size);
2128 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2130 slot_zero->magic = VCHIQ_MAGIC;
2131 slot_zero->version = VCHIQ_VERSION;
2132 slot_zero->version_min = VCHIQ_VERSION_MIN;
2133 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2134 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2135 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2136 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2138 slot_zero->master.slot_sync = first_data_slot;
2139 slot_zero->master.slot_first = first_data_slot + 1;
2140 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2141 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2142 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2143 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2149 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2151 struct vchiq_shared_state *local;
2152 struct vchiq_shared_state *remote;
2153 VCHIQ_STATUS_T status;
2154 char threadname[16];
2157 vchiq_log_warning(vchiq_core_log_level,
2158 "%s: slot_zero = %pK", __func__, slot_zero);
2160 if (vchiq_states[0]) {
2161 pr_err("%s: VCHIQ state already initialized\n", __func__);
2165 local = &slot_zero->slave;
2166 remote = &slot_zero->master;
2168 if (local->initialised) {
2169 vchiq_loud_error_header();
2170 if (remote->initialised)
2171 vchiq_loud_error("local state has already been "
2174 vchiq_loud_error("master/slave mismatch two slaves");
2175 vchiq_loud_error_footer();
2179 memset(state, 0, sizeof(struct vchiq_state));
2182 initialize shared state pointers
2185 state->local = local;
2186 state->remote = remote;
2187 state->slot_data = (struct vchiq_slot *)slot_zero;
2190 initialize events and mutexes
2193 init_completion(&state->connect);
2194 mutex_init(&state->mutex);
2195 mutex_init(&state->slot_mutex);
2196 mutex_init(&state->recycle_mutex);
2197 mutex_init(&state->sync_mutex);
2198 mutex_init(&state->bulk_transfer_mutex);
2200 init_completion(&state->slot_available_event);
2201 init_completion(&state->slot_remove_event);
2202 init_completion(&state->data_quota_event);
2204 state->slot_queue_available = 0;
2206 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2207 struct vchiq_service_quota *service_quota =
2208 &state->service_quotas[i];
2209 init_completion(&service_quota->quota_event);
2212 for (i = local->slot_first; i <= local->slot_last; i++) {
2213 local->slot_queue[state->slot_queue_available++] = i;
2214 complete(&state->slot_available_event);
2217 state->default_slot_quota = state->slot_queue_available/2;
2218 state->default_message_quota =
2219 min((unsigned short)(state->default_slot_quota * 256),
2220 (unsigned short)~0);
2222 state->previous_data_index = -1;
2223 state->data_use_count = 0;
2224 state->data_quota = state->slot_queue_available - 1;
2226 remote_event_create(&state->trigger_event, &local->trigger);
2228 remote_event_create(&state->recycle_event, &local->recycle);
2229 local->slot_queue_recycle = state->slot_queue_available;
2230 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2231 remote_event_create(&state->sync_release_event, &local->sync_release);
2233 /* At start-of-day, the slot is empty and available */
2234 ((struct vchiq_header *)
2235 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2236 VCHIQ_MSGID_PADDING;
2237 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2239 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2241 status = vchiq_platform_init_state(state);
2244 bring up slot handler thread
2246 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2247 state->slot_handler_thread = kthread_create(&slot_handler_func,
2251 if (IS_ERR(state->slot_handler_thread)) {
2252 vchiq_loud_error_header();
2253 vchiq_loud_error("couldn't create thread %s", threadname);
2254 vchiq_loud_error_footer();
2257 set_user_nice(state->slot_handler_thread, -19);
2259 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2260 state->recycle_thread = kthread_create(&recycle_func,
2263 if (IS_ERR(state->recycle_thread)) {
2264 vchiq_loud_error_header();
2265 vchiq_loud_error("couldn't create thread %s", threadname);
2266 vchiq_loud_error_footer();
2267 goto fail_free_handler_thread;
2269 set_user_nice(state->recycle_thread, -19);
2271 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2272 state->sync_thread = kthread_create(&sync_func,
2275 if (IS_ERR(state->sync_thread)) {
2276 vchiq_loud_error_header();
2277 vchiq_loud_error("couldn't create thread %s", threadname);
2278 vchiq_loud_error_footer();
2279 goto fail_free_recycle_thread;
2281 set_user_nice(state->sync_thread, -20);
2283 wake_up_process(state->slot_handler_thread);
2284 wake_up_process(state->recycle_thread);
2285 wake_up_process(state->sync_thread);
2287 vchiq_states[0] = state;
2289 /* Indicate readiness to the other side */
2290 local->initialised = 1;
2294 fail_free_recycle_thread:
2295 kthread_stop(state->recycle_thread);
2296 fail_free_handler_thread:
2297 kthread_stop(state->slot_handler_thread);
2302 /* Called from application thread when a client or server service is created. */
2303 struct vchiq_service *
2304 vchiq_add_service_internal(struct vchiq_state *state,
2305 const struct vchiq_service_params *params,
2306 int srvstate, VCHIQ_INSTANCE_T instance,
2307 VCHIQ_USERDATA_TERM_T userdata_term)
2309 struct vchiq_service *service;
2310 struct vchiq_service **pservice = NULL;
2311 struct vchiq_service_quota *service_quota;
2314 service = kmalloc(sizeof(*service), GFP_KERNEL);
2318 service->base.fourcc = params->fourcc;
2319 service->base.callback = params->callback;
2320 service->base.userdata = params->userdata;
2321 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2322 service->ref_count = 1;
2323 service->srvstate = VCHIQ_SRVSTATE_FREE;
2324 service->userdata_term = userdata_term;
2325 service->localport = VCHIQ_PORT_FREE;
2326 service->remoteport = VCHIQ_PORT_FREE;
2328 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2329 VCHIQ_FOURCC_INVALID : params->fourcc;
2330 service->client_id = 0;
2331 service->auto_close = 1;
2333 service->closing = 0;
2335 atomic_set(&service->poll_flags, 0);
2336 service->version = params->version;
2337 service->version_min = params->version_min;
2338 service->state = state;
2339 service->instance = instance;
2340 service->service_use_count = 0;
2341 init_bulk_queue(&service->bulk_tx);
2342 init_bulk_queue(&service->bulk_rx);
2343 init_completion(&service->remove_event);
2344 init_completion(&service->bulk_remove_event);
2345 mutex_init(&service->bulk_mutex);
2346 memset(&service->stats, 0, sizeof(service->stats));
2348 /* Although it is perfectly possible to use service_spinlock
2349 ** to protect the creation of services, it is overkill as it
2350 ** disables interrupts while the array is searched.
2351 ** The only danger is of another thread trying to create a
2352 ** service - service deletion is safe.
2353 ** Therefore it is preferable to use state->mutex which,
2354 ** although slower to claim, doesn't block interrupts while
2358 mutex_lock(&state->mutex);
2360 /* Prepare to use a previously unused service */
2361 if (state->unused_service < VCHIQ_MAX_SERVICES)
2362 pservice = &state->services[state->unused_service];
2364 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2365 for (i = 0; i < state->unused_service; i++) {
2366 struct vchiq_service *srv = state->services[i];
2369 pservice = &state->services[i];
2374 for (i = (state->unused_service - 1); i >= 0; i--) {
2375 struct vchiq_service *srv = state->services[i];
2378 pservice = &state->services[i];
2379 else if ((srv->public_fourcc == params->fourcc)
2380 && ((srv->instance != instance) ||
2381 (srv->base.callback !=
2382 params->callback))) {
2383 /* There is another server using this
2384 ** fourcc which doesn't match. */
2392 service->localport = (pservice - state->services);
2394 handle_seq = VCHIQ_MAX_STATES *
2396 service->handle = handle_seq |
2397 (state->id * VCHIQ_MAX_SERVICES) |
2399 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2400 *pservice = service;
2401 if (pservice == &state->services[state->unused_service])
2402 state->unused_service++;
2405 mutex_unlock(&state->mutex);
2412 service_quota = &state->service_quotas[service->localport];
2413 service_quota->slot_quota = state->default_slot_quota;
2414 service_quota->message_quota = state->default_message_quota;
2415 if (service_quota->slot_use_count == 0)
2416 service_quota->previous_tx_index =
2417 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2420 /* Bring this service online */
2421 vchiq_set_service_state(service, srvstate);
2423 vchiq_log_info(vchiq_core_msg_log_level,
2424 "%s Service %c%c%c%c SrcPort:%d",
2425 (srvstate == VCHIQ_SRVSTATE_OPENING)
2427 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2428 service->localport);
2430 /* Don't unlock the service - leave it with a ref_count of 1. */
2436 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2438 struct vchiq_open_payload payload = {
2439 service->base.fourcc,
2442 service->version_min
2444 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2446 service->client_id = client_id;
2447 vchiq_use_service_internal(service);
2448 status = queue_message(service->state,
2450 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2453 memcpy_copy_callback,
2456 QMFLAGS_IS_BLOCKING);
2457 if (status == VCHIQ_SUCCESS) {
2458 /* Wait for the ACK/NAK */
2459 if (wait_for_completion_killable(&service->remove_event)) {
2460 status = VCHIQ_RETRY;
2461 vchiq_release_service_internal(service);
2462 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2463 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2464 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2465 vchiq_log_error(vchiq_core_log_level,
2466 "%d: osi - srvstate = %s (ref %d)",
2468 srvstate_names[service->srvstate],
2469 service->ref_count);
2470 status = VCHIQ_ERROR;
2471 VCHIQ_SERVICE_STATS_INC(service, error_count);
2472 vchiq_release_service_internal(service);
2479 release_service_messages(struct vchiq_service *service)
2481 struct vchiq_state *state = service->state;
2482 int slot_last = state->remote->slot_last;
2485 /* Release any claimed messages aimed at this service */
2487 if (service->sync) {
2488 struct vchiq_header *header =
2489 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2490 state->remote->slot_sync);
2491 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2492 release_message_sync(state, header);
2497 for (i = state->remote->slot_first; i <= slot_last; i++) {
2498 struct vchiq_slot_info *slot_info =
2499 SLOT_INFO_FROM_INDEX(state, i);
2500 if (slot_info->release_count != slot_info->use_count) {
2502 (char *)SLOT_DATA_FROM_INDEX(state, i);
2503 unsigned int pos, end;
2505 end = VCHIQ_SLOT_SIZE;
2506 if (data == state->rx_data)
2507 /* This buffer is still being read from - stop
2508 ** at the current read position */
2509 end = state->rx_pos & VCHIQ_SLOT_MASK;
2514 struct vchiq_header *header =
2515 (struct vchiq_header *)(data + pos);
2516 int msgid = header->msgid;
2517 int port = VCHIQ_MSG_DSTPORT(msgid);
2519 if ((port == service->localport) &&
2520 (msgid & VCHIQ_MSGID_CLAIMED)) {
2521 vchiq_log_info(vchiq_core_log_level,
2522 " fsi - hdr %pK", header);
2523 release_slot(state, slot_info, header,
2526 pos += calc_stride(header->size);
2527 if (pos > VCHIQ_SLOT_SIZE) {
2528 vchiq_log_error(vchiq_core_log_level,
2529 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2531 header->msgid, header->size);
2532 WARN(1, "invalid slot position\n");
2540 do_abort_bulks(struct vchiq_service *service)
2542 VCHIQ_STATUS_T status;
2544 /* Abort any outstanding bulk transfers */
2545 if (mutex_lock_killable(&service->bulk_mutex) != 0)
2547 abort_outstanding_bulks(service, &service->bulk_tx);
2548 abort_outstanding_bulks(service, &service->bulk_rx);
2549 mutex_unlock(&service->bulk_mutex);
2551 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2552 if (status == VCHIQ_SUCCESS)
2553 status = notify_bulks(service, &service->bulk_rx,
2555 return (status == VCHIQ_SUCCESS);
2558 static VCHIQ_STATUS_T
2559 close_service_complete(struct vchiq_service *service, int failstate)
2561 VCHIQ_STATUS_T status;
2562 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2565 switch (service->srvstate) {
2566 case VCHIQ_SRVSTATE_OPEN:
2567 case VCHIQ_SRVSTATE_CLOSESENT:
2568 case VCHIQ_SRVSTATE_CLOSERECVD:
2570 if (service->auto_close) {
2571 service->client_id = 0;
2572 service->remoteport = VCHIQ_PORT_FREE;
2573 newstate = VCHIQ_SRVSTATE_LISTENING;
2575 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2577 newstate = VCHIQ_SRVSTATE_CLOSED;
2578 vchiq_set_service_state(service, newstate);
2580 case VCHIQ_SRVSTATE_LISTENING:
2583 vchiq_log_error(vchiq_core_log_level,
2584 "%s(%x) called in state %s", __func__,
2585 service->handle, srvstate_names[service->srvstate]);
2586 WARN(1, "%s in unexpected state\n", __func__);
2590 status = make_service_callback(service,
2591 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2593 if (status != VCHIQ_RETRY) {
2594 int uc = service->service_use_count;
2596 /* Complete the close process */
2597 for (i = 0; i < uc; i++)
2598 /* cater for cases where close is forced and the
2599 ** client may not close all it's handles */
2600 vchiq_release_service_internal(service);
2602 service->client_id = 0;
2603 service->remoteport = VCHIQ_PORT_FREE;
2605 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2606 vchiq_free_service_internal(service);
2607 else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2609 service->closing = 0;
2611 complete(&service->remove_event);
2614 vchiq_set_service_state(service, failstate);
2619 /* Called by the slot handler */
2621 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2623 struct vchiq_state *state = service->state;
2624 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2625 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2627 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2628 service->state->id, service->localport, close_recvd,
2629 srvstate_names[service->srvstate]);
2631 switch (service->srvstate) {
2632 case VCHIQ_SRVSTATE_CLOSED:
2633 case VCHIQ_SRVSTATE_HIDDEN:
2634 case VCHIQ_SRVSTATE_LISTENING:
2635 case VCHIQ_SRVSTATE_CLOSEWAIT:
2637 vchiq_log_error(vchiq_core_log_level,
2640 __func__, srvstate_names[service->srvstate]);
2641 else if (is_server) {
2642 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2643 status = VCHIQ_ERROR;
2645 service->client_id = 0;
2646 service->remoteport = VCHIQ_PORT_FREE;
2647 if (service->srvstate ==
2648 VCHIQ_SRVSTATE_CLOSEWAIT)
2649 vchiq_set_service_state(service,
2650 VCHIQ_SRVSTATE_LISTENING);
2652 complete(&service->remove_event);
2654 vchiq_free_service_internal(service);
2656 case VCHIQ_SRVSTATE_OPENING:
2658 /* The open was rejected - tell the user */
2659 vchiq_set_service_state(service,
2660 VCHIQ_SRVSTATE_CLOSEWAIT);
2661 complete(&service->remove_event);
2663 /* Shutdown mid-open - let the other side know */
2664 status = queue_message(state, service,
2668 VCHIQ_MSG_DSTPORT(service->remoteport)),
2673 case VCHIQ_SRVSTATE_OPENSYNC:
2674 mutex_lock(&state->sync_mutex);
2676 case VCHIQ_SRVSTATE_OPEN:
2678 if (!do_abort_bulks(service))
2679 status = VCHIQ_RETRY;
2682 release_service_messages(service);
2684 if (status == VCHIQ_SUCCESS)
2685 status = queue_message(state, service,
2689 VCHIQ_MSG_DSTPORT(service->remoteport)),
2690 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2692 if (status == VCHIQ_SUCCESS) {
2694 /* Change the state while the mutex is
2696 vchiq_set_service_state(service,
2697 VCHIQ_SRVSTATE_CLOSESENT);
2698 mutex_unlock(&state->slot_mutex);
2700 mutex_unlock(&state->sync_mutex);
2703 } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2704 mutex_unlock(&state->sync_mutex);
2709 /* Change the state while the mutex is still held */
2710 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2711 mutex_unlock(&state->slot_mutex);
2713 mutex_unlock(&state->sync_mutex);
2715 status = close_service_complete(service,
2716 VCHIQ_SRVSTATE_CLOSERECVD);
2719 case VCHIQ_SRVSTATE_CLOSESENT:
2721 /* This happens when a process is killed mid-close */
2724 if (!do_abort_bulks(service)) {
2725 status = VCHIQ_RETRY;
2729 if (status == VCHIQ_SUCCESS)
2730 status = close_service_complete(service,
2731 VCHIQ_SRVSTATE_CLOSERECVD);
2734 case VCHIQ_SRVSTATE_CLOSERECVD:
2735 if (!close_recvd && is_server)
2736 /* Force into LISTENING mode */
2737 vchiq_set_service_state(service,
2738 VCHIQ_SRVSTATE_LISTENING);
2739 status = close_service_complete(service,
2740 VCHIQ_SRVSTATE_CLOSERECVD);
2744 vchiq_log_error(vchiq_core_log_level,
2745 "%s(%d) called in state %s", __func__,
2746 close_recvd, srvstate_names[service->srvstate]);
2753 /* Called from the application process upon process death */
2755 vchiq_terminate_service_internal(struct vchiq_service *service)
2757 struct vchiq_state *state = service->state;
2759 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2760 state->id, service->localport, service->remoteport);
2762 mark_service_closing(service);
2764 /* Mark the service for removal by the slot handler */
2765 request_poll(state, service, VCHIQ_POLL_REMOVE);
2768 /* Called from the slot handler */
2770 vchiq_free_service_internal(struct vchiq_service *service)
2772 struct vchiq_state *state = service->state;
2774 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2775 state->id, service->localport);
2777 switch (service->srvstate) {
2778 case VCHIQ_SRVSTATE_OPENING:
2779 case VCHIQ_SRVSTATE_CLOSED:
2780 case VCHIQ_SRVSTATE_HIDDEN:
2781 case VCHIQ_SRVSTATE_LISTENING:
2782 case VCHIQ_SRVSTATE_CLOSEWAIT:
2785 vchiq_log_error(vchiq_core_log_level,
2786 "%d: fsi - (%d) in state %s",
2787 state->id, service->localport,
2788 srvstate_names[service->srvstate]);
2792 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2794 complete(&service->remove_event);
2796 /* Release the initial lock */
2797 unlock_service(service);
2801 vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
2803 struct vchiq_service *service;
2806 /* Find all services registered to this client and enable them. */
2808 while ((service = next_service_by_instance(state, instance,
2810 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2811 vchiq_set_service_state(service,
2812 VCHIQ_SRVSTATE_LISTENING);
2813 unlock_service(service);
2816 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2817 if (queue_message(state, NULL,
2818 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2819 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2822 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2825 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2826 if (wait_for_completion_killable(&state->connect))
2829 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2830 complete(&state->connect);
2833 return VCHIQ_SUCCESS;
2837 vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
2839 struct vchiq_service *service;
2842 /* Find all services registered to this client and enable them. */
2844 while ((service = next_service_by_instance(state, instance,
2846 (void)vchiq_remove_service(service->handle);
2847 unlock_service(service);
2850 return VCHIQ_SUCCESS;
2854 vchiq_pause_internal(struct vchiq_state *state)
2856 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2858 switch (state->conn_state) {
2859 case VCHIQ_CONNSTATE_CONNECTED:
2860 /* Request a pause */
2861 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSING);
2862 request_poll(state, NULL, 0);
2865 vchiq_log_error(vchiq_core_log_level,
2867 __func__, conn_state_names[state->conn_state]);
2868 status = VCHIQ_ERROR;
2869 VCHIQ_STATS_INC(state, error_count);
2877 vchiq_resume_internal(struct vchiq_state *state)
2879 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2881 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
2882 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_RESUMING);
2883 request_poll(state, NULL, 0);
2885 status = VCHIQ_ERROR;
2886 VCHIQ_STATS_INC(state, error_count);
2893 vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
2895 /* Unregister the service */
2896 struct vchiq_service *service = find_service_by_handle(handle);
2897 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2902 vchiq_log_info(vchiq_core_log_level,
2903 "%d: close_service:%d",
2904 service->state->id, service->localport);
2906 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2907 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2908 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2909 unlock_service(service);
2913 mark_service_closing(service);
2915 if (current == service->state->slot_handler_thread) {
2916 status = vchiq_close_service_internal(service,
2918 WARN_ON(status == VCHIQ_RETRY);
2920 /* Mark the service for termination by the slot handler */
2921 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2925 if (wait_for_completion_killable(&service->remove_event)) {
2926 status = VCHIQ_RETRY;
2930 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2931 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2932 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2935 vchiq_log_warning(vchiq_core_log_level,
2936 "%d: close_service:%d - waiting in state %s",
2937 service->state->id, service->localport,
2938 srvstate_names[service->srvstate]);
2941 if ((status == VCHIQ_SUCCESS) &&
2942 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2943 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2944 status = VCHIQ_ERROR;
2946 unlock_service(service);
2952 vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
2954 /* Unregister the service */
2955 struct vchiq_service *service = find_service_by_handle(handle);
2956 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2961 vchiq_log_info(vchiq_core_log_level,
2962 "%d: remove_service:%d",
2963 service->state->id, service->localport);
2965 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2966 unlock_service(service);
2970 mark_service_closing(service);
2972 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2973 (current == service->state->slot_handler_thread)) {
2974 /* Make it look like a client, because it must be removed and
2975 not left in the LISTENING state. */
2976 service->public_fourcc = VCHIQ_FOURCC_INVALID;
2978 status = vchiq_close_service_internal(service,
2980 WARN_ON(status == VCHIQ_RETRY);
2982 /* Mark the service for removal by the slot handler */
2983 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2986 if (wait_for_completion_killable(&service->remove_event)) {
2987 status = VCHIQ_RETRY;
2991 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2992 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2995 vchiq_log_warning(vchiq_core_log_level,
2996 "%d: remove_service:%d - waiting in state %s",
2997 service->state->id, service->localport,
2998 srvstate_names[service->srvstate]);
3001 if ((status == VCHIQ_SUCCESS) &&
3002 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3003 status = VCHIQ_ERROR;
3005 unlock_service(service);
3010 /* This function may be called by kernel threads or user threads.
3011 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3012 * received and the call should be retried after being returned to user
3014 * When called in blocking mode, the userdata field points to a bulk_waiter
3017 VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
3018 void *offset, int size, void *userdata,
3019 VCHIQ_BULK_MODE_T mode,
3020 VCHIQ_BULK_DIR_T dir)
3022 struct vchiq_service *service = find_service_by_handle(handle);
3023 struct vchiq_bulk_queue *queue;
3024 struct vchiq_bulk *bulk;
3025 struct vchiq_state *state;
3026 struct bulk_waiter *bulk_waiter = NULL;
3027 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3028 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3029 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3030 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3033 if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3034 !offset || vchiq_check_service(service) != VCHIQ_SUCCESS)
3038 case VCHIQ_BULK_MODE_NOCALLBACK:
3039 case VCHIQ_BULK_MODE_CALLBACK:
3041 case VCHIQ_BULK_MODE_BLOCKING:
3042 bulk_waiter = (struct bulk_waiter *)userdata;
3043 init_completion(&bulk_waiter->event);
3044 bulk_waiter->actual = 0;
3045 bulk_waiter->bulk = NULL;
3047 case VCHIQ_BULK_MODE_WAITING:
3048 bulk_waiter = (struct bulk_waiter *)userdata;
3049 bulk = bulk_waiter->bulk;
3055 state = service->state;
3057 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3058 &service->bulk_tx : &service->bulk_rx;
3060 if (mutex_lock_killable(&service->bulk_mutex) != 0) {
3061 status = VCHIQ_RETRY;
3065 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3066 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3068 mutex_unlock(&service->bulk_mutex);
3069 if (wait_for_completion_killable(
3070 &service->bulk_remove_event)) {
3071 status = VCHIQ_RETRY;
3074 if (mutex_lock_killable(&service->bulk_mutex)
3076 status = VCHIQ_RETRY;
3079 } while (queue->local_insert == queue->remove +
3080 VCHIQ_NUM_SERVICE_BULKS);
3083 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3087 bulk->userdata = userdata;
3089 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3091 if (vchiq_prepare_bulk_data(bulk, offset, size, dir) != VCHIQ_SUCCESS)
3092 goto unlock_error_exit;
3096 vchiq_log_info(vchiq_core_log_level,
3097 "%d: bt (%d->%d) %cx %x@%pK %pK",
3098 state->id, service->localport, service->remoteport, dir_char,
3099 size, bulk->data, userdata);
3101 /* The slot mutex must be held when the service is being closed, so
3102 claim it here to ensure that isn't happening */
3103 if (mutex_lock_killable(&state->slot_mutex) != 0) {
3104 status = VCHIQ_RETRY;
3105 goto cancel_bulk_error_exit;
3108 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3109 goto unlock_both_error_exit;
3111 payload[0] = (int)(long)bulk->data;
3112 payload[1] = bulk->size;
3113 status = queue_message(state,
3115 VCHIQ_MAKE_MSG(dir_msgtype,
3117 service->remoteport),
3118 memcpy_copy_callback,
3121 QMFLAGS_IS_BLOCKING |
3122 QMFLAGS_NO_MUTEX_LOCK |
3123 QMFLAGS_NO_MUTEX_UNLOCK);
3124 if (status != VCHIQ_SUCCESS) {
3125 goto unlock_both_error_exit;
3128 queue->local_insert++;
3130 mutex_unlock(&state->slot_mutex);
3131 mutex_unlock(&service->bulk_mutex);
3133 vchiq_log_trace(vchiq_core_log_level,
3134 "%d: bt:%d %cx li=%x ri=%x p=%x",
3136 service->localport, dir_char,
3137 queue->local_insert, queue->remote_insert, queue->process);
3140 unlock_service(service);
3142 status = VCHIQ_SUCCESS;
3145 bulk_waiter->bulk = bulk;
3146 if (wait_for_completion_killable(&bulk_waiter->event))
3147 status = VCHIQ_RETRY;
3148 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3149 status = VCHIQ_ERROR;
3154 unlock_both_error_exit:
3155 mutex_unlock(&state->slot_mutex);
3156 cancel_bulk_error_exit:
3157 vchiq_complete_bulk(bulk);
3159 mutex_unlock(&service->bulk_mutex);
3163 unlock_service(service);
3168 vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
3169 ssize_t (*copy_callback)(void *context, void *dest,
3170 size_t offset, size_t maxsize),
3174 struct vchiq_service *service = find_service_by_handle(handle);
3175 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3178 (vchiq_check_service(service) != VCHIQ_SUCCESS))
3182 VCHIQ_SERVICE_STATS_INC(service, error_count);
3187 if (size > VCHIQ_MAX_MSG_SIZE) {
3188 VCHIQ_SERVICE_STATS_INC(service, error_count);
3192 switch (service->srvstate) {
3193 case VCHIQ_SRVSTATE_OPEN:
3194 status = queue_message(service->state, service,
3195 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3197 service->remoteport),
3198 copy_callback, context, size, 1);
3200 case VCHIQ_SRVSTATE_OPENSYNC:
3201 status = queue_message_sync(service->state, service,
3202 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3204 service->remoteport),
3205 copy_callback, context, size, 1);
3208 status = VCHIQ_ERROR;
3214 unlock_service(service);
3220 vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle,
3221 struct vchiq_header *header)
3223 struct vchiq_service *service = find_service_by_handle(handle);
3224 struct vchiq_shared_state *remote;
3225 struct vchiq_state *state;
3231 state = service->state;
3232 remote = state->remote;
3234 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3236 if ((slot_index >= remote->slot_first) &&
3237 (slot_index <= remote->slot_last)) {
3238 int msgid = header->msgid;
3240 if (msgid & VCHIQ_MSGID_CLAIMED) {
3241 struct vchiq_slot_info *slot_info =
3242 SLOT_INFO_FROM_INDEX(state, slot_index);
3244 release_slot(state, slot_info, header, service);
3246 } else if (slot_index == remote->slot_sync)
3247 release_message_sync(state, header);
3249 unlock_service(service);
3253 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3255 header->msgid = VCHIQ_MSGID_PADDING;
3256 remote_event_signal(&state->remote->sync_release);
3260 vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
3262 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3263 struct vchiq_service *service = find_service_by_handle(handle);
3266 (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3269 *peer_version = service->peer_version;
3270 status = VCHIQ_SUCCESS;
3274 unlock_service(service);
3278 void vchiq_get_config(struct vchiq_config *config)
3280 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3281 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3282 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3283 config->max_services = VCHIQ_MAX_SERVICES;
3284 config->version = VCHIQ_VERSION;
3285 config->version_min = VCHIQ_VERSION_MIN;
3289 vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
3290 VCHIQ_SERVICE_OPTION_T option, int value)
3292 struct vchiq_service *service = find_service_by_handle(handle);
3293 VCHIQ_STATUS_T status = VCHIQ_ERROR;
3297 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3298 service->auto_close = value;
3299 status = VCHIQ_SUCCESS;
3302 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3303 struct vchiq_service_quota *service_quota =
3304 &service->state->service_quotas[
3305 service->localport];
3307 value = service->state->default_slot_quota;
3308 if ((value >= service_quota->slot_use_count) &&
3309 (value < (unsigned short)~0)) {
3310 service_quota->slot_quota = value;
3311 if ((value >= service_quota->slot_use_count) &&
3312 (service_quota->message_quota >=
3313 service_quota->message_use_count)) {
3314 /* Signal the service that it may have
3315 ** dropped below its quota */
3316 complete(&service_quota->quota_event);
3318 status = VCHIQ_SUCCESS;
3322 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3323 struct vchiq_service_quota *service_quota =
3324 &service->state->service_quotas[
3325 service->localport];
3327 value = service->state->default_message_quota;
3328 if ((value >= service_quota->message_use_count) &&
3329 (value < (unsigned short)~0)) {
3330 service_quota->message_quota = value;
3332 service_quota->message_use_count) &&
3333 (service_quota->slot_quota >=
3334 service_quota->slot_use_count))
3335 /* Signal the service that it may have
3336 ** dropped below its quota */
3337 complete(&service_quota->quota_event);
3338 status = VCHIQ_SUCCESS;
3342 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3343 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3344 (service->srvstate ==
3345 VCHIQ_SRVSTATE_LISTENING)) {
3346 service->sync = value;
3347 status = VCHIQ_SUCCESS;
3351 case VCHIQ_SERVICE_OPTION_TRACE:
3352 service->trace = value;
3353 status = VCHIQ_SUCCESS;
3359 unlock_service(service);
3366 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3367 struct vchiq_shared_state *shared, const char *label)
3369 static const char *const debug_names[] = {
3371 "SLOT_HANDLER_COUNT",
3372 "SLOT_HANDLER_LINE",
3376 "AWAIT_COMPLETION_LINE",
3377 "DEQUEUE_MESSAGE_LINE",
3378 "SERVICE_CALLBACK_LINE",
3379 "MSG_QUEUE_FULL_COUNT",
3380 "COMPLETION_QUEUE_FULL_COUNT"
3386 len = snprintf(buf, sizeof(buf),
3387 " %s: slots %d-%d tx_pos=%x recycle=%x",
3388 label, shared->slot_first, shared->slot_last,
3389 shared->tx_pos, shared->slot_queue_recycle);
3390 vchiq_dump(dump_context, buf, len + 1);
3392 len = snprintf(buf, sizeof(buf),
3394 vchiq_dump(dump_context, buf, len + 1);
3396 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3397 struct vchiq_slot_info slot_info =
3398 *SLOT_INFO_FROM_INDEX(state, i);
3399 if (slot_info.use_count != slot_info.release_count) {
3400 len = snprintf(buf, sizeof(buf),
3401 " %d: %d/%d", i, slot_info.use_count,
3402 slot_info.release_count);
3403 vchiq_dump(dump_context, buf, len + 1);
3407 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3408 len = snprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3409 debug_names[i], shared->debug[i], shared->debug[i]);
3410 vchiq_dump(dump_context, buf, len + 1);
3415 vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3421 len = snprintf(buf, sizeof(buf), "State %d: %s", state->id,
3422 conn_state_names[state->conn_state]);
3423 vchiq_dump(dump_context, buf, len + 1);
3425 len = snprintf(buf, sizeof(buf),
3426 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3427 state->local->tx_pos,
3428 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3430 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3431 vchiq_dump(dump_context, buf, len + 1);
3433 len = snprintf(buf, sizeof(buf),
3434 " Version: %d (min %d)",
3435 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3436 vchiq_dump(dump_context, buf, len + 1);
3438 if (VCHIQ_ENABLE_STATS) {
3439 len = snprintf(buf, sizeof(buf),
3440 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
3442 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3443 state->stats.error_count);
3444 vchiq_dump(dump_context, buf, len + 1);
3447 len = snprintf(buf, sizeof(buf),
3448 " Slots: %d available (%d data), %d recyclable, %d stalls "
3450 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3451 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3452 state->data_quota - state->data_use_count,
3453 state->local->slot_queue_recycle - state->slot_queue_available,
3454 state->stats.slot_stalls, state->stats.data_stalls);
3455 vchiq_dump(dump_context, buf, len + 1);
3457 vchiq_dump_platform_state(dump_context);
3459 vchiq_dump_shared_state(dump_context, state, state->local, "Local");
3460 vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
3462 vchiq_dump_platform_instances(dump_context);
3464 for (i = 0; i < state->unused_service; i++) {
3465 struct vchiq_service *service = find_service_by_port(state, i);
3468 vchiq_dump_service_state(dump_context, service);
3469 unlock_service(service);
3475 vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3480 len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3481 service->localport, srvstate_names[service->srvstate],
3482 service->ref_count - 1); /*Don't include the lock just taken*/
3484 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3485 char remoteport[30];
3486 struct vchiq_service_quota *service_quota =
3487 &service->state->service_quotas[service->localport];
3488 int fourcc = service->base.fourcc;
3489 int tx_pending, rx_pending;
3491 if (service->remoteport != VCHIQ_PORT_FREE) {
3492 int len2 = snprintf(remoteport, sizeof(remoteport),
3493 "%u", service->remoteport);
3495 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3496 snprintf(remoteport + len2,
3497 sizeof(remoteport) - len2,
3498 " (client %x)", service->client_id);
3500 strcpy(remoteport, "n/a");
3502 len += snprintf(buf + len, sizeof(buf) - len,
3503 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3504 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3506 service_quota->message_use_count,
3507 service_quota->message_quota,
3508 service_quota->slot_use_count,
3509 service_quota->slot_quota);
3511 vchiq_dump(dump_context, buf, len + 1);
3513 tx_pending = service->bulk_tx.local_insert -
3514 service->bulk_tx.remote_insert;
3516 rx_pending = service->bulk_rx.local_insert -
3517 service->bulk_rx.remote_insert;
3519 len = snprintf(buf, sizeof(buf),
3520 " Bulk: tx_pending=%d (size %d),"
3521 " rx_pending=%d (size %d)",
3523 tx_pending ? service->bulk_tx.bulks[
3524 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3526 rx_pending ? service->bulk_rx.bulks[
3527 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3529 if (VCHIQ_ENABLE_STATS) {
3530 vchiq_dump(dump_context, buf, len + 1);
3532 len = snprintf(buf, sizeof(buf),
3533 " Ctrl: tx_count=%d, tx_bytes=%llu, "
3534 "rx_count=%d, rx_bytes=%llu",
3535 service->stats.ctrl_tx_count,
3536 service->stats.ctrl_tx_bytes,
3537 service->stats.ctrl_rx_count,
3538 service->stats.ctrl_rx_bytes);
3539 vchiq_dump(dump_context, buf, len + 1);
3541 len = snprintf(buf, sizeof(buf),
3542 " Bulk: tx_count=%d, tx_bytes=%llu, "
3543 "rx_count=%d, rx_bytes=%llu",
3544 service->stats.bulk_tx_count,
3545 service->stats.bulk_tx_bytes,
3546 service->stats.bulk_rx_count,
3547 service->stats.bulk_rx_bytes);
3548 vchiq_dump(dump_context, buf, len + 1);
3550 len = snprintf(buf, sizeof(buf),
3551 " %d quota stalls, %d slot stalls, "
3552 "%d bulk stalls, %d aborted, %d errors",
3553 service->stats.quota_stalls,
3554 service->stats.slot_stalls,
3555 service->stats.bulk_stalls,
3556 service->stats.bulk_aborted_count,
3557 service->stats.error_count);
3561 vchiq_dump(dump_context, buf, len + 1);
3563 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3564 vchiq_dump_platform_service_state(dump_context, service);
3568 vchiq_loud_error_header(void)
3570 vchiq_log_error(vchiq_core_log_level,
3571 "============================================================"
3572 "================");
3573 vchiq_log_error(vchiq_core_log_level,
3574 "============================================================"
3575 "================");
3576 vchiq_log_error(vchiq_core_log_level, "=====");
3580 vchiq_loud_error_footer(void)
3582 vchiq_log_error(vchiq_core_log_level, "=====");
3583 vchiq_log_error(vchiq_core_log_level,
3584 "============================================================"
3585 "================");
3586 vchiq_log_error(vchiq_core_log_level,
3587 "============================================================"
3588 "================");
3591 VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
3593 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3595 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3596 status = queue_message(state, NULL,
3597 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3602 VCHIQ_STATUS_T vchiq_send_remote_release(struct vchiq_state *state)
3604 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3606 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3607 status = queue_message(state, NULL,
3608 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
3613 VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
3615 VCHIQ_STATUS_T status = VCHIQ_RETRY;
3617 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3618 status = queue_message(state, NULL,
3619 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3624 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3627 const u8 *mem = (const u8 *)void_mem;
3632 while (num_bytes > 0) {
3635 for (offset = 0; offset < 16; offset++) {
3636 if (offset < num_bytes)
3637 s += snprintf(s, 4, "%02x ", mem[offset]);
3639 s += snprintf(s, 4, " ");
3642 for (offset = 0; offset < 16; offset++) {
3643 if (offset < num_bytes) {
3644 u8 ch = mem[offset];
3646 if ((ch < ' ') || (ch > '~'))
3653 if ((label != NULL) && (*label != '\0'))
3654 vchiq_log_trace(VCHIQ_LOG_TRACE,
3655 "%s: %08x: %s", label, addr, line_buf);
3657 vchiq_log_trace(VCHIQ_LOG_TRACE,
3658 "%08x: %s", addr, line_buf);