1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/bug.h>
20 #include <linux/completion.h>
21 #include <linux/list.h>
23 #include <linux/platform_device.h>
24 #include <linux/compat.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_bus.h"
38 #include "vchiq_debugfs.h"
39 #include "vchiq_connected.h"
40 #include "vchiq_pagelist.h"
42 #define DEVICE_NAME "vchiq"
44 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
46 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
48 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
49 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
54 #define ARM_DS_ACTIVE BIT(2)
56 /* Override the default prefix, which would be vchiq_arm (from the filename) */
57 #undef MODULE_PARAM_PREFIX
58 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
60 #define KEEPALIVE_VER 1
61 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
63 /* Run time control of log level, based on KERN_XXX level. */
64 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
65 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
67 DEFINE_SPINLOCK(msg_queue_spinlock);
68 struct vchiq_state g_state;
71 * The devices implemented in the VCHIQ firmware are not discoverable,
72 * so we need to maintain a list of them in order to register them with
75 static struct vchiq_device *bcm2835_audio;
76 static struct vchiq_device *bcm2835_camera;
78 struct vchiq_drvdata {
79 const unsigned int cache_line_size;
80 struct rpi_firmware *fw;
83 static struct vchiq_drvdata bcm2835_drvdata = {
84 .cache_line_size = 32,
87 static struct vchiq_drvdata bcm2836_drvdata = {
88 .cache_line_size = 64,
91 struct vchiq_arm_state {
92 /* Keepalive-related data */
93 struct task_struct *ka_thread;
94 struct completion ka_evt;
95 atomic_t ka_use_count;
96 atomic_t ka_use_ack_count;
97 atomic_t ka_release_count;
99 rwlock_t susp_res_lock;
101 struct vchiq_state *state;
104 * Global use count for videocore.
105 * This is equal to the sum of the use counts for all services. When
106 * this hits zero the videocore suspend procedure will be initiated.
108 int videocore_use_count;
111 * Use count to track requests from videocore peer.
112 * This use count is not associated with a service, so needs to be
113 * tracked separately with the state.
118 * Flag to indicate that the first vchiq connect has made it through.
119 * This means that both sides should be fully ready, and we should
120 * be able to suspend after this point.
125 struct vchiq_2835_state {
127 struct vchiq_arm_state arm_state;
130 struct vchiq_pagelist_info {
131 struct pagelist *pagelist;
132 size_t pagelist_buffer_size;
134 enum dma_data_direction dma_dir;
135 unsigned int num_pages;
136 unsigned int pages_need_release;
138 struct scatterlist *scatterlist;
139 unsigned int scatterlist_mapped;
142 static void __iomem *g_regs;
143 /* This value is the size of the L2 cache lines as understood by the
144 * VPU firmware, which determines the required alignment of the
145 * offsets/sizes in pagelists.
147 * Modern VPU firmware looks for a DT "cache-line-size" property in
148 * the VCHIQ node and will overwrite it with the actual L2 cache size,
149 * which the kernel must then respect. That property was rejected
150 * upstream, so we have to use the VPU firmware's compatibility value
153 static unsigned int g_cache_line_size = 32;
154 static unsigned int g_fragments_size;
155 static char *g_fragments_base;
156 static char *g_free_fragments;
157 static struct semaphore g_free_fragments_sema;
159 static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
162 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
163 unsigned int size, enum vchiq_bulk_dir dir);
166 vchiq_doorbell_irq(int irq, void *dev_id)
168 struct vchiq_state *state = dev_id;
169 irqreturn_t ret = IRQ_NONE;
172 /* Read (and clear) the doorbell */
173 status = readl(g_regs + BELL0);
175 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
176 remote_event_pollall(state);
184 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
186 if (pagelistinfo->scatterlist_mapped) {
187 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
188 pagelistinfo->num_pages, pagelistinfo->dma_dir);
191 if (pagelistinfo->pages_need_release)
192 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
194 dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
195 pagelistinfo->pagelist, pagelistinfo->dma_addr);
199 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
206 tmp = (addrs[k - 1] & PAGE_MASK) +
207 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
209 return tmp == (addr & PAGE_MASK);
212 /* There is a potential problem with partial cache lines (pages?)
213 * at the ends of the block when reading. If the CPU accessed anything in
214 * the same line (page?) then it may have pulled old data into the cache,
215 * obscuring the new data underneath. We can solve this by transferring the
216 * partial cache lines separately, and allowing the ARM to copy into the
220 static struct vchiq_pagelist_info *
221 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
222 size_t count, unsigned short type)
224 struct pagelist *pagelist;
225 struct vchiq_pagelist_info *pagelistinfo;
228 unsigned int num_pages, offset, i, k;
230 size_t pagelist_size;
231 struct scatterlist *scatterlist, *sg;
235 if (count >= INT_MAX - PAGE_SIZE)
239 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
241 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
242 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
244 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
245 sizeof(struct vchiq_pagelist_info)) /
246 (sizeof(u32) + sizeof(pages[0]) +
247 sizeof(struct scatterlist)))
250 pagelist_size = sizeof(struct pagelist) +
251 (num_pages * sizeof(u32)) +
252 (num_pages * sizeof(pages[0]) +
253 (num_pages * sizeof(struct scatterlist))) +
254 sizeof(struct vchiq_pagelist_info);
256 /* Allocate enough storage to hold the page pointers and the page
259 pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
262 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
267 addrs = pagelist->addrs;
268 pages = (struct page **)(addrs + num_pages);
269 scatterlist = (struct scatterlist *)(pages + num_pages);
270 pagelistinfo = (struct vchiq_pagelist_info *)
271 (scatterlist + num_pages);
273 pagelist->length = count;
274 pagelist->type = type;
275 pagelist->offset = offset;
277 /* Populate the fields of the pagelistinfo structure */
278 pagelistinfo->pagelist = pagelist;
279 pagelistinfo->pagelist_buffer_size = pagelist_size;
280 pagelistinfo->dma_addr = dma_addr;
281 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
282 DMA_TO_DEVICE : DMA_FROM_DEVICE;
283 pagelistinfo->num_pages = num_pages;
284 pagelistinfo->pages_need_release = 0;
285 pagelistinfo->pages = pages;
286 pagelistinfo->scatterlist = scatterlist;
287 pagelistinfo->scatterlist_mapped = 0;
290 unsigned long length = count;
291 unsigned int off = offset;
293 for (actual_pages = 0; actual_pages < num_pages;
296 vmalloc_to_page((buf +
297 (actual_pages * PAGE_SIZE)));
298 size_t bytes = PAGE_SIZE - off;
301 cleanup_pagelistinfo(instance, pagelistinfo);
307 pages[actual_pages] = pg;
311 /* do not try and release vmalloc pages */
313 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
314 type == PAGELIST_READ, pages);
316 if (actual_pages != num_pages) {
317 vchiq_log_info(vchiq_arm_log_level,
318 "%s - only %d/%d pages locked",
319 __func__, actual_pages, num_pages);
321 /* This is probably due to the process being killed */
322 if (actual_pages > 0)
323 unpin_user_pages(pages, actual_pages);
324 cleanup_pagelistinfo(instance, pagelistinfo);
327 /* release user pages */
328 pagelistinfo->pages_need_release = 1;
332 * Initialize the scatterlist so that the magic cookie
333 * is filled if debugging is enabled
335 sg_init_table(scatterlist, num_pages);
336 /* Now set the pages for each scatterlist */
337 for (i = 0; i < num_pages; i++) {
338 unsigned int len = PAGE_SIZE - offset;
342 sg_set_page(scatterlist + i, pages[i], len, offset);
347 dma_buffers = dma_map_sg(instance->state->dev,
350 pagelistinfo->dma_dir);
352 if (dma_buffers == 0) {
353 cleanup_pagelistinfo(instance, pagelistinfo);
357 pagelistinfo->scatterlist_mapped = 1;
359 /* Combine adjacent blocks for performance */
361 for_each_sg(scatterlist, sg, dma_buffers, i) {
362 u32 len = sg_dma_len(sg);
363 u32 addr = sg_dma_address(sg);
365 /* Note: addrs is the address + page_count - 1
366 * The firmware expects blocks after the first to be page-
367 * aligned and a multiple of the page size
370 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
371 WARN_ON(i && (addr & ~PAGE_MASK));
372 if (is_adjacent_block(addrs, addr, k))
373 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
375 addrs[k++] = (addr & PAGE_MASK) |
376 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
379 /* Partial cache lines (fragments) require special measures */
380 if ((type == PAGELIST_READ) &&
381 ((pagelist->offset & (g_cache_line_size - 1)) ||
382 ((pagelist->offset + pagelist->length) &
383 (g_cache_line_size - 1)))) {
386 if (down_interruptible(&g_free_fragments_sema)) {
387 cleanup_pagelistinfo(instance, pagelistinfo);
391 WARN_ON(!g_free_fragments);
393 down(&g_free_fragments_mutex);
394 fragments = g_free_fragments;
396 g_free_fragments = *(char **)g_free_fragments;
397 up(&g_free_fragments_mutex);
398 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
399 (fragments - g_fragments_base) / g_fragments_size;
406 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
409 struct pagelist *pagelist = pagelistinfo->pagelist;
410 struct page **pages = pagelistinfo->pages;
411 unsigned int num_pages = pagelistinfo->num_pages;
413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
414 __func__, pagelistinfo->pagelist, actual);
417 * NOTE: dma_unmap_sg must be called before the
418 * cpu can touch any of the data/pages.
420 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
421 pagelistinfo->num_pages, pagelistinfo->dma_dir);
422 pagelistinfo->scatterlist_mapped = 0;
424 /* Deal with any partial cache lines (fragments) */
425 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
426 char *fragments = g_fragments_base +
427 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
429 int head_bytes, tail_bytes;
431 head_bytes = (g_cache_line_size - pagelist->offset) &
432 (g_cache_line_size - 1);
433 tail_bytes = (pagelist->offset + actual) &
434 (g_cache_line_size - 1);
436 if ((actual >= 0) && (head_bytes != 0)) {
437 if (head_bytes > actual)
440 memcpy_to_page(pages[0],
445 if ((actual >= 0) && (head_bytes < actual) &&
447 memcpy_to_page(pages[num_pages - 1],
448 (pagelist->offset + actual) &
449 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
450 fragments + g_cache_line_size,
453 down(&g_free_fragments_mutex);
454 *(char **)fragments = g_free_fragments;
455 g_free_fragments = fragments;
456 up(&g_free_fragments_mutex);
457 up(&g_free_fragments_sema);
460 /* Need to mark all the pages dirty. */
461 if (pagelist->type != PAGELIST_WRITE &&
462 pagelistinfo->pages_need_release) {
465 for (i = 0; i < num_pages; i++)
466 set_page_dirty(pages[i]);
469 cleanup_pagelistinfo(instance, pagelistinfo);
472 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
474 struct device *dev = &pdev->dev;
475 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
476 struct rpi_firmware *fw = drvdata->fw;
477 struct vchiq_slot_zero *vchiq_slot_zero;
479 dma_addr_t slot_phys;
481 int slot_mem_size, frag_mem_size;
485 * VCHI messages between the CPU and firmware use
486 * 32-bit bus addresses.
488 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
493 g_cache_line_size = drvdata->cache_line_size;
494 g_fragments_size = 2 * g_cache_line_size;
496 /* Allocate space for the channels in coherent memory */
497 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
498 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
500 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
501 &slot_phys, GFP_KERNEL);
503 dev_err(dev, "could not allocate DMA memory\n");
507 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
509 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
510 if (!vchiq_slot_zero)
513 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
514 (int)slot_phys + slot_mem_size;
515 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
518 g_fragments_base = (char *)slot_mem + slot_mem_size;
520 g_free_fragments = g_fragments_base;
521 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
522 *(char **)&g_fragments_base[i * g_fragments_size] =
523 &g_fragments_base[(i + 1) * g_fragments_size];
525 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
526 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
528 err = vchiq_init_state(state, vchiq_slot_zero, dev);
532 g_regs = devm_platform_ioremap_resource(pdev, 0);
534 return PTR_ERR(g_regs);
536 irq = platform_get_irq(pdev, 0);
540 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
541 "VCHIQ doorbell", state);
543 dev_err(dev, "failed to register irq=%d\n", irq);
547 /* Send the base address of the slots to VideoCore */
548 channelbase = slot_phys;
549 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
550 &channelbase, sizeof(channelbase));
552 dev_err(dev, "failed to send firmware property: %d\n", err);
557 dev_err(dev, "failed to set channelbase (response: %x)\n",
562 vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
563 vchiq_slot_zero, &slot_phys);
565 vchiq_call_connected_callbacks();
571 vchiq_arm_init_state(struct vchiq_state *state,
572 struct vchiq_arm_state *arm_state)
575 rwlock_init(&arm_state->susp_res_lock);
577 init_completion(&arm_state->ka_evt);
578 atomic_set(&arm_state->ka_use_count, 0);
579 atomic_set(&arm_state->ka_use_ack_count, 0);
580 atomic_set(&arm_state->ka_release_count, 0);
582 arm_state->state = state;
583 arm_state->first_connect = 0;
588 vchiq_platform_init_state(struct vchiq_state *state)
590 struct vchiq_2835_state *platform_state;
592 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
593 if (!state->platform_state)
596 platform_state = (struct vchiq_2835_state *)state->platform_state;
598 platform_state->inited = 1;
599 vchiq_arm_init_state(state, &platform_state->arm_state);
604 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
606 struct vchiq_2835_state *platform_state;
608 platform_state = (struct vchiq_2835_state *)state->platform_state;
610 WARN_ON_ONCE(!platform_state->inited);
612 return &platform_state->arm_state;
616 remote_event_signal(struct remote_event *event)
619 * Ensure that all writes to shared data structures have completed
620 * before signalling the peer.
626 dsb(sy); /* data barrier operation */
629 writel(0, g_regs + BELL2); /* trigger vc interrupt */
633 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
634 void __user *uoffset, int size, int dir)
636 struct vchiq_pagelist_info *pagelistinfo;
638 pagelistinfo = create_pagelist(instance, offset, uoffset, size,
639 (dir == VCHIQ_BULK_RECEIVE)
646 bulk->data = pagelistinfo->dma_addr;
649 * Store the pagelistinfo address in remote_data,
650 * which isn't used by the slave.
652 bulk->remote_data = pagelistinfo;
658 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
660 if (bulk && bulk->remote_data && bulk->actual)
661 free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
665 int vchiq_dump_platform_state(void *dump_context)
670 len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)");
671 return vchiq_dump(dump_context, buf, len + 1);
674 #define VCHIQ_INIT_RETRIES 10
675 int vchiq_initialise(struct vchiq_instance **instance_out)
677 struct vchiq_state *state;
678 struct vchiq_instance *instance = NULL;
682 * VideoCore may not be ready due to boot up timing.
683 * It may never be ready if kernel and firmware are mismatched,so don't
686 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
687 state = vchiq_get_state();
690 usleep_range(500, 600);
692 if (i == VCHIQ_INIT_RETRIES) {
693 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
697 vchiq_log_warning(vchiq_core_log_level,
698 "%s: videocore initialized after %d retries\n", __func__, i);
701 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
703 vchiq_log_error(vchiq_core_log_level,
704 "%s: error allocating vchiq instance\n", __func__);
709 instance->connected = 0;
710 instance->state = state;
711 mutex_init(&instance->bulk_waiter_list_mutex);
712 INIT_LIST_HEAD(&instance->bulk_waiter_list);
714 *instance_out = instance;
719 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
723 EXPORT_SYMBOL(vchiq_initialise);
725 void free_bulk_waiter(struct vchiq_instance *instance)
727 struct bulk_waiter_node *waiter, *next;
729 list_for_each_entry_safe(waiter, next,
730 &instance->bulk_waiter_list, list) {
731 list_del(&waiter->list);
732 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
733 waiter, waiter->pid);
738 int vchiq_shutdown(struct vchiq_instance *instance)
741 struct vchiq_state *state = instance->state;
743 if (mutex_lock_killable(&state->mutex))
746 /* Remove all services */
747 vchiq_shutdown_internal(state, instance);
749 mutex_unlock(&state->mutex);
751 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
753 free_bulk_waiter(instance);
758 EXPORT_SYMBOL(vchiq_shutdown);
760 static int vchiq_is_connected(struct vchiq_instance *instance)
762 return instance->connected;
765 int vchiq_connect(struct vchiq_instance *instance)
768 struct vchiq_state *state = instance->state;
770 if (mutex_lock_killable(&state->mutex)) {
771 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
775 status = vchiq_connect_internal(state, instance);
778 instance->connected = 1;
780 mutex_unlock(&state->mutex);
783 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
787 EXPORT_SYMBOL(vchiq_connect);
790 vchiq_add_service(struct vchiq_instance *instance,
791 const struct vchiq_service_params_kernel *params,
792 unsigned int *phandle)
795 struct vchiq_state *state = instance->state;
796 struct vchiq_service *service = NULL;
799 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
801 srvstate = vchiq_is_connected(instance)
802 ? VCHIQ_SRVSTATE_LISTENING
803 : VCHIQ_SRVSTATE_HIDDEN;
805 service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
808 *phandle = service->handle;
814 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
820 vchiq_open_service(struct vchiq_instance *instance,
821 const struct vchiq_service_params_kernel *params,
822 unsigned int *phandle)
824 int status = -EINVAL;
825 struct vchiq_state *state = instance->state;
826 struct vchiq_service *service = NULL;
828 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
830 if (!vchiq_is_connected(instance))
833 service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
836 *phandle = service->handle;
837 status = vchiq_open_service_internal(service, current->pid);
839 vchiq_remove_service(instance, service->handle);
840 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
845 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
849 EXPORT_SYMBOL(vchiq_open_service);
852 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
853 unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
859 case VCHIQ_BULK_MODE_NOCALLBACK:
860 case VCHIQ_BULK_MODE_CALLBACK:
861 status = vchiq_bulk_transfer(instance, handle,
863 size, userdata, mode,
864 VCHIQ_BULK_TRANSMIT);
866 case VCHIQ_BULK_MODE_BLOCKING:
867 status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
868 VCHIQ_BULK_TRANSMIT);
875 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
876 * to implement a retry mechanism since this function is
877 * supposed to block until queued
879 if (status != -EAGAIN)
887 EXPORT_SYMBOL(vchiq_bulk_transmit);
889 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
890 void *data, unsigned int size, void *userdata,
891 enum vchiq_bulk_mode mode)
897 case VCHIQ_BULK_MODE_NOCALLBACK:
898 case VCHIQ_BULK_MODE_CALLBACK:
899 status = vchiq_bulk_transfer(instance, handle, data, NULL,
901 mode, VCHIQ_BULK_RECEIVE);
903 case VCHIQ_BULK_MODE_BLOCKING:
904 status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
912 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
913 * to implement a retry mechanism since this function is
914 * supposed to block until queued
916 if (status != -EAGAIN)
924 EXPORT_SYMBOL(vchiq_bulk_receive);
927 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
928 unsigned int size, enum vchiq_bulk_dir dir)
930 struct vchiq_service *service;
932 struct bulk_waiter_node *waiter = NULL, *iter;
934 service = find_service_by_handle(instance, handle);
938 vchiq_service_put(service);
940 mutex_lock(&instance->bulk_waiter_list_mutex);
941 list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
942 if (iter->pid == current->pid) {
943 list_del(&iter->list);
948 mutex_unlock(&instance->bulk_waiter_list_mutex);
951 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
954 /* This thread has an outstanding bulk transfer. */
955 /* FIXME: why compare a dma address to a pointer? */
956 if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
958 * This is not a retry of the previous one.
959 * Cancel the signal when the transfer completes.
961 spin_lock(&bulk_waiter_spinlock);
962 bulk->userdata = NULL;
963 spin_unlock(&bulk_waiter_spinlock);
967 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
969 vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
974 status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
975 &waiter->bulk_waiter,
976 VCHIQ_BULK_MODE_BLOCKING, dir);
977 if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
978 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
981 /* Cancel the signal when the transfer completes. */
982 spin_lock(&bulk_waiter_spinlock);
983 bulk->userdata = NULL;
984 spin_unlock(&bulk_waiter_spinlock);
988 waiter->pid = current->pid;
989 mutex_lock(&instance->bulk_waiter_list_mutex);
990 list_add(&waiter->list, &instance->bulk_waiter_list);
991 mutex_unlock(&instance->bulk_waiter_list_mutex);
992 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
1000 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
1001 struct vchiq_header *header, struct user_service *user_service,
1002 void *bulk_userdata)
1004 struct vchiq_completion_data_kernel *completion;
1007 DEBUG_INITIALISE(g_state.local);
1009 insert = instance->completion_insert;
1010 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1011 /* Out of space - wait for the client */
1012 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1013 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
1014 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1015 if (wait_for_completion_interruptible(&instance->remove_event)) {
1016 vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
1018 } else if (instance->closing) {
1019 vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1022 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1025 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1027 completion->header = header;
1028 completion->reason = reason;
1029 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1030 completion->service_userdata = user_service->service;
1031 completion->bulk_userdata = bulk_userdata;
1033 if (reason == VCHIQ_SERVICE_CLOSED) {
1035 * Take an extra reference, to be held until
1036 * this CLOSED notification is delivered.
1038 vchiq_service_get(user_service->service);
1039 if (instance->use_close_delivered)
1040 user_service->close_pending = 1;
1044 * A write barrier is needed here to ensure that the entire completion
1045 * record is written out before the insert point.
1049 if (reason == VCHIQ_MESSAGE_AVAILABLE)
1050 user_service->message_available_pos = insert;
1053 instance->completion_insert = insert;
1055 complete(&instance->insert_event);
1061 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1062 struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1065 * How do we ensure the callback goes to the right client?
1066 * The service_user data points to a user_service record
1067 * containing the original callback and the user state structure, which
1068 * contains a circular buffer for completion records.
1070 struct user_service *user_service;
1071 struct vchiq_service *service;
1072 bool skip_completion = false;
1074 DEBUG_INITIALISE(g_state.local);
1076 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1079 service = handle_to_service(instance, handle);
1080 if (WARN_ON(!service)) {
1085 user_service = (struct user_service *)service->base.userdata;
1087 if (!instance || instance->closing) {
1093 * As hopping around different synchronization mechanism,
1094 * taking an extra reference results in simpler implementation.
1096 vchiq_service_get(service);
1099 vchiq_log_trace(vchiq_arm_log_level,
1100 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1101 __func__, (unsigned long)user_service, service->localport,
1102 user_service->userdata, reason, (unsigned long)header,
1103 (unsigned long)instance, (unsigned long)bulk_userdata);
1105 if (header && user_service->is_vchi) {
1106 spin_lock(&msg_queue_spinlock);
1107 while (user_service->msg_insert ==
1108 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1109 spin_unlock(&msg_queue_spinlock);
1110 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1111 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1112 vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1114 * If there is no MESSAGE_AVAILABLE in the completion
1117 if ((user_service->message_available_pos -
1118 instance->completion_remove) < 0) {
1121 vchiq_log_info(vchiq_arm_log_level,
1122 "Inserting extra MESSAGE_AVAILABLE");
1123 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1124 status = add_completion(instance, reason, NULL, user_service,
1127 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1128 vchiq_service_put(service);
1133 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1134 if (wait_for_completion_interruptible(&user_service->remove_event)) {
1135 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1136 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1137 vchiq_service_put(service);
1139 } else if (instance->closing) {
1140 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1141 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1142 vchiq_service_put(service);
1145 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1146 spin_lock(&msg_queue_spinlock);
1149 user_service->msg_queue[user_service->msg_insert &
1150 (MSG_QUEUE_SIZE - 1)] = header;
1151 user_service->msg_insert++;
1154 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1155 * there is a MESSAGE_AVAILABLE in the completion queue then
1156 * bypass the completion queue.
1158 if (((user_service->message_available_pos -
1159 instance->completion_remove) >= 0) ||
1160 user_service->dequeue_pending) {
1161 user_service->dequeue_pending = 0;
1162 skip_completion = true;
1165 spin_unlock(&msg_queue_spinlock);
1166 complete(&user_service->insert_event);
1170 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1171 vchiq_service_put(service);
1173 if (skip_completion)
1176 return add_completion(instance, reason, header, user_service,
1180 int vchiq_dump(void *dump_context, const char *str, int len)
1182 struct dump_context *context = (struct dump_context *)dump_context;
1185 if (context->actual >= context->space)
1188 if (context->offset > 0) {
1189 int skip_bytes = min_t(int, len, context->offset);
1193 context->offset -= skip_bytes;
1194 if (context->offset > 0)
1197 copy_bytes = min_t(int, len, context->space - context->actual);
1198 if (copy_bytes == 0)
1200 if (copy_to_user(context->buf + context->actual, str,
1203 context->actual += copy_bytes;
1207 * If the terminating NUL is included in the length, then it
1208 * marks the end of a line and should be replaced with a
1211 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1214 if (copy_to_user(context->buf + context->actual - 1,
1221 int vchiq_dump_platform_instances(void *dump_context)
1223 struct vchiq_state *state = vchiq_get_state();
1232 * There is no list of instances, so instead scan all services,
1233 * marking those that have been dumped.
1237 for (i = 0; i < state->unused_service; i++) {
1238 struct vchiq_service *service;
1239 struct vchiq_instance *instance;
1241 service = rcu_dereference(state->services[i]);
1242 if (!service || service->base.callback != service_callback)
1245 instance = service->instance;
1251 for (i = 0; i < state->unused_service; i++) {
1252 struct vchiq_service *service;
1253 struct vchiq_instance *instance;
1257 service = rcu_dereference(state->services[i]);
1258 if (!service || service->base.callback != service_callback) {
1263 instance = service->instance;
1264 if (!instance || instance->mark) {
1270 len = snprintf(buf, sizeof(buf),
1271 "Instance %pK: pid %d,%s completions %d/%d",
1272 instance, instance->pid,
1273 instance->connected ? " connected, " :
1275 instance->completion_insert -
1276 instance->completion_remove,
1278 err = vchiq_dump(dump_context, buf, len + 1);
1286 int vchiq_dump_platform_service_state(void *dump_context,
1287 struct vchiq_service *service)
1289 struct user_service *user_service =
1290 (struct user_service *)service->base.userdata;
1294 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
1296 if ((service->base.callback == service_callback) && user_service->is_vchi) {
1297 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1298 user_service->msg_insert - user_service->msg_remove,
1301 if (user_service->dequeue_pending)
1302 len += scnprintf(buf + len, sizeof(buf) - len,
1303 " (dequeue pending)");
1306 return vchiq_dump(dump_context, buf, len + 1);
1309 struct vchiq_state *
1310 vchiq_get_state(void)
1312 if (!g_state.remote) {
1313 pr_err("%s: g_state.remote == NULL\n", __func__);
1317 if (g_state.remote->initialised != 1) {
1318 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1319 __func__, g_state.remote->initialised);
1327 * Autosuspend related functionality
1331 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1332 enum vchiq_reason reason,
1333 struct vchiq_header *header,
1334 unsigned int service_user, void *bulk_user)
1336 vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1341 vchiq_keepalive_thread_func(void *v)
1343 struct vchiq_state *state = (struct vchiq_state *)v;
1344 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1347 struct vchiq_instance *instance;
1348 unsigned int ka_handle;
1351 struct vchiq_service_params_kernel params = {
1352 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1353 .callback = vchiq_keepalive_vchiq_callback,
1354 .version = KEEPALIVE_VER,
1355 .version_min = KEEPALIVE_VER_MIN
1358 ret = vchiq_initialise(&instance);
1360 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1365 status = vchiq_connect(instance);
1367 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1372 status = vchiq_add_service(instance, ¶ms, &ka_handle);
1374 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1380 long rc = 0, uc = 0;
1382 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1383 vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1384 flush_signals(current);
1389 * read and clear counters. Do release_count then use_count to
1390 * prevent getting more releases than uses
1392 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1393 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1396 * Call use/release service the requisite number of times.
1397 * Process use before release so use counts don't go negative
1400 atomic_inc(&arm_state->ka_use_ack_count);
1401 status = vchiq_use_service(instance, ka_handle);
1403 vchiq_log_error(vchiq_susp_log_level,
1404 "%s vchiq_use_service error %d", __func__, status);
1408 status = vchiq_release_service(instance, ka_handle);
1410 vchiq_log_error(vchiq_susp_log_level,
1411 "%s vchiq_release_service error %d", __func__,
1418 vchiq_shutdown(instance);
1424 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1425 enum USE_TYPE_E use_type)
1427 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1438 if (use_type == USE_TYPE_VCHIQ) {
1439 sprintf(entity, "VCHIQ: ");
1440 entity_uc = &arm_state->peer_use_count;
1441 } else if (service) {
1442 sprintf(entity, "%c%c%c%c:%03d",
1443 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1444 service->client_id);
1445 entity_uc = &service->service_use_count;
1447 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1452 write_lock_bh(&arm_state->susp_res_lock);
1453 local_uc = ++arm_state->videocore_use_count;
1456 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1457 *entity_uc, local_uc);
1459 write_unlock_bh(&arm_state->susp_res_lock);
1463 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1465 while (ack_cnt && !status) {
1466 /* Send the use notify to videocore */
1467 status = vchiq_send_remote_use_active(state);
1471 atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1476 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1481 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1483 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1494 sprintf(entity, "%c%c%c%c:%03d",
1495 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1496 service->client_id);
1497 entity_uc = &service->service_use_count;
1499 sprintf(entity, "PEER: ");
1500 entity_uc = &arm_state->peer_use_count;
1503 write_lock_bh(&arm_state->susp_res_lock);
1504 if (!arm_state->videocore_use_count || !(*entity_uc)) {
1505 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1506 WARN_ON(!arm_state->videocore_use_count);
1507 WARN_ON(!(*entity_uc));
1511 --arm_state->videocore_use_count;
1514 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1515 *entity_uc, arm_state->videocore_use_count);
1518 write_unlock_bh(&arm_state->susp_res_lock);
1521 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1526 vchiq_on_remote_use(struct vchiq_state *state)
1528 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1530 atomic_inc(&arm_state->ka_use_count);
1531 complete(&arm_state->ka_evt);
1535 vchiq_on_remote_release(struct vchiq_state *state)
1537 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1539 atomic_inc(&arm_state->ka_release_count);
1540 complete(&arm_state->ka_evt);
1544 vchiq_use_service_internal(struct vchiq_service *service)
1546 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1550 vchiq_release_service_internal(struct vchiq_service *service)
1552 return vchiq_release_internal(service->state, service);
1555 struct vchiq_debugfs_node *
1556 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1558 return &instance->debugfs_node;
1562 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1564 struct vchiq_service *service;
1565 int use_count = 0, i;
1569 while ((service = __next_service_by_instance(instance->state,
1571 use_count += service->service_use_count;
1577 vchiq_instance_get_pid(struct vchiq_instance *instance)
1579 return instance->pid;
1583 vchiq_instance_get_trace(struct vchiq_instance *instance)
1585 return instance->trace;
1589 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1591 struct vchiq_service *service;
1596 while ((service = __next_service_by_instance(instance->state,
1598 service->trace = trace;
1600 instance->trace = (trace != 0);
1604 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1607 struct vchiq_service *service = find_service_by_handle(instance, handle);
1610 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1611 vchiq_service_put(service);
1615 EXPORT_SYMBOL(vchiq_use_service);
1618 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1621 struct vchiq_service *service = find_service_by_handle(instance, handle);
1624 ret = vchiq_release_internal(service->state, service);
1625 vchiq_service_put(service);
1629 EXPORT_SYMBOL(vchiq_release_service);
1631 struct service_data_struct {
1638 vchiq_dump_service_use_state(struct vchiq_state *state)
1640 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1641 struct service_data_struct *service_data;
1644 * If there's more than 64 services, only dump ones with
1647 int only_nonzero = 0;
1648 static const char *nz = "<-- preventing suspend";
1652 int active_services;
1657 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1662 read_lock_bh(&arm_state->susp_res_lock);
1663 peer_count = arm_state->peer_use_count;
1664 vc_use_count = arm_state->videocore_use_count;
1665 active_services = state->unused_service;
1666 if (active_services > MAX_SERVICES)
1670 for (i = 0; i < active_services; i++) {
1671 struct vchiq_service *service_ptr =
1672 rcu_dereference(state->services[i]);
1677 if (only_nonzero && !service_ptr->service_use_count)
1680 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1683 service_data[found].fourcc = service_ptr->base.fourcc;
1684 service_data[found].clientid = service_ptr->client_id;
1685 service_data[found].use_count = service_ptr->service_use_count;
1687 if (found >= MAX_SERVICES)
1692 read_unlock_bh(&arm_state->susp_res_lock);
1695 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1696 active_services, found);
1698 for (i = 0; i < found; i++) {
1699 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1700 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1701 service_data[i].clientid, service_data[i].use_count,
1702 service_data[i].use_count ? nz : "");
1704 vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1705 vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1708 kfree(service_data);
1712 vchiq_check_service(struct vchiq_service *service)
1714 struct vchiq_arm_state *arm_state;
1717 if (!service || !service->state)
1720 arm_state = vchiq_platform_get_arm_state(service->state);
1722 read_lock_bh(&arm_state->susp_res_lock);
1723 if (service->service_use_count)
1725 read_unlock_bh(&arm_state->susp_res_lock);
1728 vchiq_log_error(vchiq_susp_log_level,
1729 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1730 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1731 service->service_use_count, arm_state->videocore_use_count);
1732 vchiq_dump_service_use_state(service->state);
1738 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1739 enum vchiq_connstate oldstate,
1740 enum vchiq_connstate newstate)
1742 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1743 char threadname[16];
1745 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1746 get_conn_state_name(oldstate), get_conn_state_name(newstate));
1747 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1750 write_lock_bh(&arm_state->susp_res_lock);
1751 if (arm_state->first_connect) {
1752 write_unlock_bh(&arm_state->susp_res_lock);
1756 arm_state->first_connect = 1;
1757 write_unlock_bh(&arm_state->susp_res_lock);
1758 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1760 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1763 if (IS_ERR(arm_state->ka_thread)) {
1764 vchiq_log_error(vchiq_susp_log_level,
1765 "vchiq: FATAL: couldn't create thread %s",
1768 wake_up_process(arm_state->ka_thread);
1772 static const struct of_device_id vchiq_of_match[] = {
1773 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1774 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1777 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1779 static int vchiq_probe(struct platform_device *pdev)
1781 struct device_node *fw_node;
1782 const struct of_device_id *of_id;
1783 struct vchiq_drvdata *drvdata;
1786 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1787 drvdata = (struct vchiq_drvdata *)of_id->data;
1791 fw_node = of_find_compatible_node(NULL, NULL,
1792 "raspberrypi,bcm2835-firmware");
1794 dev_err(&pdev->dev, "Missing firmware node\n");
1798 drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1799 of_node_put(fw_node);
1801 return -EPROBE_DEFER;
1803 platform_set_drvdata(pdev, drvdata);
1805 err = vchiq_platform_init(pdev, &g_state);
1807 goto failed_platform_init;
1809 vchiq_debugfs_init();
1811 vchiq_log_info(vchiq_arm_log_level,
1812 "vchiq: platform initialised - version %d (min %d)",
1813 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1816 * Simply exit on error since the function handles cleanup in
1819 err = vchiq_register_chrdev(&pdev->dev);
1821 vchiq_log_warning(vchiq_arm_log_level,
1822 "Failed to initialize vchiq cdev");
1826 bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1827 bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1831 failed_platform_init:
1832 vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1837 static void vchiq_remove(struct platform_device *pdev)
1839 vchiq_device_unregister(bcm2835_audio);
1840 vchiq_device_unregister(bcm2835_camera);
1841 vchiq_debugfs_deinit();
1842 vchiq_deregister_chrdev();
1845 static struct platform_driver vchiq_driver = {
1847 .name = "bcm2835_vchiq",
1848 .of_match_table = vchiq_of_match,
1850 .probe = vchiq_probe,
1851 .remove_new = vchiq_remove,
1854 static int __init vchiq_driver_init(void)
1858 ret = bus_register(&vchiq_bus_type);
1860 pr_err("Failed to register %s\n", vchiq_bus_type.name);
1864 ret = platform_driver_register(&vchiq_driver);
1866 pr_err("Failed to register vchiq driver\n");
1867 bus_unregister(&vchiq_bus_type);
1872 module_init(vchiq_driver_init);
1874 static void __exit vchiq_driver_exit(void)
1876 bus_unregister(&vchiq_bus_type);
1877 platform_driver_unregister(&vchiq_driver);
1879 module_exit(vchiq_driver_exit);
1881 MODULE_LICENSE("Dual BSD/GPL");
1882 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1883 MODULE_AUTHOR("Broadcom Corporation");