eef9c8c06e66d763518e3f6116540519c6139a23
[sfrench/cifs-2.6.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/bug.h>
20 #include <linux/completion.h>
21 #include <linux/list.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/compat.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
33
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_bus.h"
38 #include "vchiq_debugfs.h"
39 #include "vchiq_connected.h"
40 #include "vchiq_pagelist.h"
41
42 #define DEVICE_NAME "vchiq"
43
44 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
45
46 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
47
48 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
49 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
50
51 #define BELL0   0x00
52 #define BELL2   0x08
53
54 #define ARM_DS_ACTIVE   BIT(2)
55
56 /* Override the default prefix, which would be vchiq_arm (from the filename) */
57 #undef MODULE_PARAM_PREFIX
58 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
59
60 #define KEEPALIVE_VER 1
61 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
62
63 /* Run time control of log level, based on KERN_XXX level. */
64 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
65 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
66
67 DEFINE_SPINLOCK(msg_queue_spinlock);
68 struct vchiq_state g_state;
69
70 /*
71  * The devices implemented in the VCHIQ firmware are not discoverable,
72  * so we need to maintain a list of them in order to register them with
73  * the interface.
74  */
75 static struct vchiq_device *bcm2835_audio;
76 static struct vchiq_device *bcm2835_camera;
77
78 struct vchiq_drvdata {
79         const unsigned int cache_line_size;
80         struct rpi_firmware *fw;
81 };
82
83 static struct vchiq_drvdata bcm2835_drvdata = {
84         .cache_line_size = 32,
85 };
86
87 static struct vchiq_drvdata bcm2836_drvdata = {
88         .cache_line_size = 64,
89 };
90
91 struct vchiq_arm_state {
92         /* Keepalive-related data */
93         struct task_struct *ka_thread;
94         struct completion ka_evt;
95         atomic_t ka_use_count;
96         atomic_t ka_use_ack_count;
97         atomic_t ka_release_count;
98
99         rwlock_t susp_res_lock;
100
101         struct vchiq_state *state;
102
103         /*
104          * Global use count for videocore.
105          * This is equal to the sum of the use counts for all services.  When
106          * this hits zero the videocore suspend procedure will be initiated.
107          */
108         int videocore_use_count;
109
110         /*
111          * Use count to track requests from videocore peer.
112          * This use count is not associated with a service, so needs to be
113          * tracked separately with the state.
114          */
115         int peer_use_count;
116
117         /*
118          * Flag to indicate that the first vchiq connect has made it through.
119          * This means that both sides should be fully ready, and we should
120          * be able to suspend after this point.
121          */
122         int first_connect;
123 };
124
125 struct vchiq_2835_state {
126         int inited;
127         struct vchiq_arm_state arm_state;
128 };
129
130 struct vchiq_pagelist_info {
131         struct pagelist *pagelist;
132         size_t pagelist_buffer_size;
133         dma_addr_t dma_addr;
134         enum dma_data_direction dma_dir;
135         unsigned int num_pages;
136         unsigned int pages_need_release;
137         struct page **pages;
138         struct scatterlist *scatterlist;
139         unsigned int scatterlist_mapped;
140 };
141
142 static void __iomem *g_regs;
143 /* This value is the size of the L2 cache lines as understood by the
144  * VPU firmware, which determines the required alignment of the
145  * offsets/sizes in pagelists.
146  *
147  * Modern VPU firmware looks for a DT "cache-line-size" property in
148  * the VCHIQ node and will overwrite it with the actual L2 cache size,
149  * which the kernel must then respect.  That property was rejected
150  * upstream, so we have to use the VPU firmware's compatibility value
151  * of 32.
152  */
153 static unsigned int g_cache_line_size = 32;
154 static unsigned int g_fragments_size;
155 static char *g_fragments_base;
156 static char *g_free_fragments;
157 static struct semaphore g_free_fragments_sema;
158
159 static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
160
161 static int
162 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
163                              unsigned int size, enum vchiq_bulk_dir dir);
164
165 static irqreturn_t
166 vchiq_doorbell_irq(int irq, void *dev_id)
167 {
168         struct vchiq_state *state = dev_id;
169         irqreturn_t ret = IRQ_NONE;
170         unsigned int status;
171
172         /* Read (and clear) the doorbell */
173         status = readl(g_regs + BELL0);
174
175         if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
176                 remote_event_pollall(state);
177                 ret = IRQ_HANDLED;
178         }
179
180         return ret;
181 }
182
183 static void
184 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
185 {
186         if (pagelistinfo->scatterlist_mapped) {
187                 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
188                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
189         }
190
191         if (pagelistinfo->pages_need_release)
192                 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
193
194         dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
195                           pagelistinfo->pagelist, pagelistinfo->dma_addr);
196 }
197
198 static inline bool
199 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
200 {
201         u32 tmp;
202
203         if (!k)
204                 return false;
205
206         tmp = (addrs[k - 1] & PAGE_MASK) +
207               (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
208
209         return tmp == (addr & PAGE_MASK);
210 }
211
212 /* There is a potential problem with partial cache lines (pages?)
213  * at the ends of the block when reading. If the CPU accessed anything in
214  * the same line (page?) then it may have pulled old data into the cache,
215  * obscuring the new data underneath. We can solve this by transferring the
216  * partial cache lines separately, and allowing the ARM to copy into the
217  * cached area.
218  */
219
220 static struct vchiq_pagelist_info *
221 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
222                 size_t count, unsigned short type)
223 {
224         struct pagelist *pagelist;
225         struct vchiq_pagelist_info *pagelistinfo;
226         struct page **pages;
227         u32 *addrs;
228         unsigned int num_pages, offset, i, k;
229         int actual_pages;
230         size_t pagelist_size;
231         struct scatterlist *scatterlist, *sg;
232         int dma_buffers;
233         dma_addr_t dma_addr;
234
235         if (count >= INT_MAX - PAGE_SIZE)
236                 return NULL;
237
238         if (buf)
239                 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
240         else
241                 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
242         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
243
244         if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
245                          sizeof(struct vchiq_pagelist_info)) /
246                         (sizeof(u32) + sizeof(pages[0]) +
247                          sizeof(struct scatterlist)))
248                 return NULL;
249
250         pagelist_size = sizeof(struct pagelist) +
251                         (num_pages * sizeof(u32)) +
252                         (num_pages * sizeof(pages[0]) +
253                         (num_pages * sizeof(struct scatterlist))) +
254                         sizeof(struct vchiq_pagelist_info);
255
256         /* Allocate enough storage to hold the page pointers and the page
257          * list
258          */
259         pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
260                                       GFP_KERNEL);
261
262         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
263
264         if (!pagelist)
265                 return NULL;
266
267         addrs           = pagelist->addrs;
268         pages           = (struct page **)(addrs + num_pages);
269         scatterlist     = (struct scatterlist *)(pages + num_pages);
270         pagelistinfo    = (struct vchiq_pagelist_info *)
271                           (scatterlist + num_pages);
272
273         pagelist->length = count;
274         pagelist->type = type;
275         pagelist->offset = offset;
276
277         /* Populate the fields of the pagelistinfo structure */
278         pagelistinfo->pagelist = pagelist;
279         pagelistinfo->pagelist_buffer_size = pagelist_size;
280         pagelistinfo->dma_addr = dma_addr;
281         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
282                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
283         pagelistinfo->num_pages = num_pages;
284         pagelistinfo->pages_need_release = 0;
285         pagelistinfo->pages = pages;
286         pagelistinfo->scatterlist = scatterlist;
287         pagelistinfo->scatterlist_mapped = 0;
288
289         if (buf) {
290                 unsigned long length = count;
291                 unsigned int off = offset;
292
293                 for (actual_pages = 0; actual_pages < num_pages;
294                      actual_pages++) {
295                         struct page *pg =
296                                 vmalloc_to_page((buf +
297                                                  (actual_pages * PAGE_SIZE)));
298                         size_t bytes = PAGE_SIZE - off;
299
300                         if (!pg) {
301                                 cleanup_pagelistinfo(instance, pagelistinfo);
302                                 return NULL;
303                         }
304
305                         if (bytes > length)
306                                 bytes = length;
307                         pages[actual_pages] = pg;
308                         length -= bytes;
309                         off = 0;
310                 }
311                 /* do not try and release vmalloc pages */
312         } else {
313                 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
314                                                    type == PAGELIST_READ, pages);
315
316                 if (actual_pages != num_pages) {
317                         vchiq_log_info(vchiq_arm_log_level,
318                                        "%s - only %d/%d pages locked",
319                                        __func__, actual_pages, num_pages);
320
321                         /* This is probably due to the process being killed */
322                         if (actual_pages > 0)
323                                 unpin_user_pages(pages, actual_pages);
324                         cleanup_pagelistinfo(instance, pagelistinfo);
325                         return NULL;
326                 }
327                  /* release user pages */
328                 pagelistinfo->pages_need_release = 1;
329         }
330
331         /*
332          * Initialize the scatterlist so that the magic cookie
333          *  is filled if debugging is enabled
334          */
335         sg_init_table(scatterlist, num_pages);
336         /* Now set the pages for each scatterlist */
337         for (i = 0; i < num_pages; i++) {
338                 unsigned int len = PAGE_SIZE - offset;
339
340                 if (len > count)
341                         len = count;
342                 sg_set_page(scatterlist + i, pages[i], len, offset);
343                 offset = 0;
344                 count -= len;
345         }
346
347         dma_buffers = dma_map_sg(instance->state->dev,
348                                  scatterlist,
349                                  num_pages,
350                                  pagelistinfo->dma_dir);
351
352         if (dma_buffers == 0) {
353                 cleanup_pagelistinfo(instance, pagelistinfo);
354                 return NULL;
355         }
356
357         pagelistinfo->scatterlist_mapped = 1;
358
359         /* Combine adjacent blocks for performance */
360         k = 0;
361         for_each_sg(scatterlist, sg, dma_buffers, i) {
362                 u32 len = sg_dma_len(sg);
363                 u32 addr = sg_dma_address(sg);
364
365                 /* Note: addrs is the address + page_count - 1
366                  * The firmware expects blocks after the first to be page-
367                  * aligned and a multiple of the page size
368                  */
369                 WARN_ON(len == 0);
370                 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
371                 WARN_ON(i && (addr & ~PAGE_MASK));
372                 if (is_adjacent_block(addrs, addr, k))
373                         addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
374                 else
375                         addrs[k++] = (addr & PAGE_MASK) |
376                                 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
377         }
378
379         /* Partial cache lines (fragments) require special measures */
380         if ((type == PAGELIST_READ) &&
381             ((pagelist->offset & (g_cache_line_size - 1)) ||
382             ((pagelist->offset + pagelist->length) &
383             (g_cache_line_size - 1)))) {
384                 char *fragments;
385
386                 if (down_interruptible(&g_free_fragments_sema)) {
387                         cleanup_pagelistinfo(instance, pagelistinfo);
388                         return NULL;
389                 }
390
391                 WARN_ON(!g_free_fragments);
392
393                 down(&g_free_fragments_mutex);
394                 fragments = g_free_fragments;
395                 WARN_ON(!fragments);
396                 g_free_fragments = *(char **)g_free_fragments;
397                 up(&g_free_fragments_mutex);
398                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
399                         (fragments - g_fragments_base) / g_fragments_size;
400         }
401
402         return pagelistinfo;
403 }
404
405 static void
406 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
407               int actual)
408 {
409         struct pagelist *pagelist = pagelistinfo->pagelist;
410         struct page **pages = pagelistinfo->pages;
411         unsigned int num_pages = pagelistinfo->num_pages;
412
413         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
414                         __func__, pagelistinfo->pagelist, actual);
415
416         /*
417          * NOTE: dma_unmap_sg must be called before the
418          * cpu can touch any of the data/pages.
419          */
420         dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
421                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
422         pagelistinfo->scatterlist_mapped = 0;
423
424         /* Deal with any partial cache lines (fragments) */
425         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
426                 char *fragments = g_fragments_base +
427                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
428                         g_fragments_size;
429                 int head_bytes, tail_bytes;
430
431                 head_bytes = (g_cache_line_size - pagelist->offset) &
432                         (g_cache_line_size - 1);
433                 tail_bytes = (pagelist->offset + actual) &
434                         (g_cache_line_size - 1);
435
436                 if ((actual >= 0) && (head_bytes != 0)) {
437                         if (head_bytes > actual)
438                                 head_bytes = actual;
439
440                         memcpy_to_page(pages[0],
441                                 pagelist->offset,
442                                 fragments,
443                                 head_bytes);
444                 }
445                 if ((actual >= 0) && (head_bytes < actual) &&
446                     (tail_bytes != 0))
447                         memcpy_to_page(pages[num_pages - 1],
448                                 (pagelist->offset + actual) &
449                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
450                                 fragments + g_cache_line_size,
451                                 tail_bytes);
452
453                 down(&g_free_fragments_mutex);
454                 *(char **)fragments = g_free_fragments;
455                 g_free_fragments = fragments;
456                 up(&g_free_fragments_mutex);
457                 up(&g_free_fragments_sema);
458         }
459
460         /* Need to mark all the pages dirty. */
461         if (pagelist->type != PAGELIST_WRITE &&
462             pagelistinfo->pages_need_release) {
463                 unsigned int i;
464
465                 for (i = 0; i < num_pages; i++)
466                         set_page_dirty(pages[i]);
467         }
468
469         cleanup_pagelistinfo(instance, pagelistinfo);
470 }
471
472 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
473 {
474         struct device *dev = &pdev->dev;
475         struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
476         struct rpi_firmware *fw = drvdata->fw;
477         struct vchiq_slot_zero *vchiq_slot_zero;
478         void *slot_mem;
479         dma_addr_t slot_phys;
480         u32 channelbase;
481         int slot_mem_size, frag_mem_size;
482         int err, irq, i;
483
484         /*
485          * VCHI messages between the CPU and firmware use
486          * 32-bit bus addresses.
487          */
488         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
489
490         if (err < 0)
491                 return err;
492
493         g_cache_line_size = drvdata->cache_line_size;
494         g_fragments_size = 2 * g_cache_line_size;
495
496         /* Allocate space for the channels in coherent memory */
497         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
498         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
499
500         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
501                                        &slot_phys, GFP_KERNEL);
502         if (!slot_mem) {
503                 dev_err(dev, "could not allocate DMA memory\n");
504                 return -ENOMEM;
505         }
506
507         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
508
509         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
510         if (!vchiq_slot_zero)
511                 return -ENOMEM;
512
513         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
514                 (int)slot_phys + slot_mem_size;
515         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
516                 MAX_FRAGMENTS;
517
518         g_fragments_base = (char *)slot_mem + slot_mem_size;
519
520         g_free_fragments = g_fragments_base;
521         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
522                 *(char **)&g_fragments_base[i * g_fragments_size] =
523                         &g_fragments_base[(i + 1) * g_fragments_size];
524         }
525         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
526         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
527
528         err = vchiq_init_state(state, vchiq_slot_zero, dev);
529         if (err)
530                 return err;
531
532         g_regs = devm_platform_ioremap_resource(pdev, 0);
533         if (IS_ERR(g_regs))
534                 return PTR_ERR(g_regs);
535
536         irq = platform_get_irq(pdev, 0);
537         if (irq <= 0)
538                 return irq;
539
540         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
541                                "VCHIQ doorbell", state);
542         if (err) {
543                 dev_err(dev, "failed to register irq=%d\n", irq);
544                 return err;
545         }
546
547         /* Send the base address of the slots to VideoCore */
548         channelbase = slot_phys;
549         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
550                                     &channelbase, sizeof(channelbase));
551         if (err) {
552                 dev_err(dev, "failed to send firmware property: %d\n", err);
553                 return err;
554         }
555
556         if (channelbase) {
557                 dev_err(dev, "failed to set channelbase (response: %x)\n",
558                         channelbase);
559                 return -ENXIO;
560         }
561
562         vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
563                        vchiq_slot_zero, &slot_phys);
564
565         vchiq_call_connected_callbacks();
566
567         return 0;
568 }
569
570 static void
571 vchiq_arm_init_state(struct vchiq_state *state,
572                      struct vchiq_arm_state *arm_state)
573 {
574         if (arm_state) {
575                 rwlock_init(&arm_state->susp_res_lock);
576
577                 init_completion(&arm_state->ka_evt);
578                 atomic_set(&arm_state->ka_use_count, 0);
579                 atomic_set(&arm_state->ka_use_ack_count, 0);
580                 atomic_set(&arm_state->ka_release_count, 0);
581
582                 arm_state->state = state;
583                 arm_state->first_connect = 0;
584         }
585 }
586
587 int
588 vchiq_platform_init_state(struct vchiq_state *state)
589 {
590         struct vchiq_2835_state *platform_state;
591
592         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
593         if (!state->platform_state)
594                 return -ENOMEM;
595
596         platform_state = (struct vchiq_2835_state *)state->platform_state;
597
598         platform_state->inited = 1;
599         vchiq_arm_init_state(state, &platform_state->arm_state);
600
601         return 0;
602 }
603
604 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
605 {
606         struct vchiq_2835_state *platform_state;
607
608         platform_state   = (struct vchiq_2835_state *)state->platform_state;
609
610         WARN_ON_ONCE(!platform_state->inited);
611
612         return &platform_state->arm_state;
613 }
614
615 void
616 remote_event_signal(struct remote_event *event)
617 {
618         /*
619          * Ensure that all writes to shared data structures have completed
620          * before signalling the peer.
621          */
622         wmb();
623
624         event->fired = 1;
625
626         dsb(sy);         /* data barrier operation */
627
628         if (event->armed)
629                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
630 }
631
632 int
633 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
634                         void __user *uoffset, int size, int dir)
635 {
636         struct vchiq_pagelist_info *pagelistinfo;
637
638         pagelistinfo = create_pagelist(instance, offset, uoffset, size,
639                                        (dir == VCHIQ_BULK_RECEIVE)
640                                        ? PAGELIST_READ
641                                        : PAGELIST_WRITE);
642
643         if (!pagelistinfo)
644                 return -ENOMEM;
645
646         bulk->data = pagelistinfo->dma_addr;
647
648         /*
649          * Store the pagelistinfo address in remote_data,
650          * which isn't used by the slave.
651          */
652         bulk->remote_data = pagelistinfo;
653
654         return 0;
655 }
656
657 void
658 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
659 {
660         if (bulk && bulk->remote_data && bulk->actual)
661                 free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
662                               bulk->actual);
663 }
664
665 int vchiq_dump_platform_state(void *dump_context)
666 {
667         char buf[80];
668         int len;
669
670         len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
671         return vchiq_dump(dump_context, buf, len + 1);
672 }
673
674 #define VCHIQ_INIT_RETRIES 10
675 int vchiq_initialise(struct vchiq_instance **instance_out)
676 {
677         struct vchiq_state *state;
678         struct vchiq_instance *instance = NULL;
679         int i, ret;
680
681         /*
682          * VideoCore may not be ready due to boot up timing.
683          * It may never be ready if kernel and firmware are mismatched,so don't
684          * block forever.
685          */
686         for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
687                 state = vchiq_get_state();
688                 if (state)
689                         break;
690                 usleep_range(500, 600);
691         }
692         if (i == VCHIQ_INIT_RETRIES) {
693                 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
694                 ret = -ENOTCONN;
695                 goto failed;
696         } else if (i > 0) {
697                 vchiq_log_warning(vchiq_core_log_level,
698                                   "%s: videocore initialized after %d retries\n", __func__, i);
699         }
700
701         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
702         if (!instance) {
703                 vchiq_log_error(vchiq_core_log_level,
704                                 "%s: error allocating vchiq instance\n", __func__);
705                 ret = -ENOMEM;
706                 goto failed;
707         }
708
709         instance->connected = 0;
710         instance->state = state;
711         mutex_init(&instance->bulk_waiter_list_mutex);
712         INIT_LIST_HEAD(&instance->bulk_waiter_list);
713
714         *instance_out = instance;
715
716         ret = 0;
717
718 failed:
719         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
720
721         return ret;
722 }
723 EXPORT_SYMBOL(vchiq_initialise);
724
725 void free_bulk_waiter(struct vchiq_instance *instance)
726 {
727         struct bulk_waiter_node *waiter, *next;
728
729         list_for_each_entry_safe(waiter, next,
730                                  &instance->bulk_waiter_list, list) {
731                 list_del(&waiter->list);
732                 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
733                                waiter, waiter->pid);
734                 kfree(waiter);
735         }
736 }
737
738 int vchiq_shutdown(struct vchiq_instance *instance)
739 {
740         int status = 0;
741         struct vchiq_state *state = instance->state;
742
743         if (mutex_lock_killable(&state->mutex))
744                 return -EAGAIN;
745
746         /* Remove all services */
747         vchiq_shutdown_internal(state, instance);
748
749         mutex_unlock(&state->mutex);
750
751         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
752
753         free_bulk_waiter(instance);
754         kfree(instance);
755
756         return status;
757 }
758 EXPORT_SYMBOL(vchiq_shutdown);
759
760 static int vchiq_is_connected(struct vchiq_instance *instance)
761 {
762         return instance->connected;
763 }
764
765 int vchiq_connect(struct vchiq_instance *instance)
766 {
767         int status;
768         struct vchiq_state *state = instance->state;
769
770         if (mutex_lock_killable(&state->mutex)) {
771                 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
772                 status = -EAGAIN;
773                 goto failed;
774         }
775         status = vchiq_connect_internal(state, instance);
776
777         if (!status)
778                 instance->connected = 1;
779
780         mutex_unlock(&state->mutex);
781
782 failed:
783         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
784
785         return status;
786 }
787 EXPORT_SYMBOL(vchiq_connect);
788
789 static int
790 vchiq_add_service(struct vchiq_instance *instance,
791                   const struct vchiq_service_params_kernel *params,
792                   unsigned int *phandle)
793 {
794         int status;
795         struct vchiq_state *state = instance->state;
796         struct vchiq_service *service = NULL;
797         int srvstate;
798
799         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
800
801         srvstate = vchiq_is_connected(instance)
802                 ? VCHIQ_SRVSTATE_LISTENING
803                 : VCHIQ_SRVSTATE_HIDDEN;
804
805         service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
806
807         if (service) {
808                 *phandle = service->handle;
809                 status = 0;
810         } else {
811                 status = -EINVAL;
812         }
813
814         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
815
816         return status;
817 }
818
819 int
820 vchiq_open_service(struct vchiq_instance *instance,
821                    const struct vchiq_service_params_kernel *params,
822                    unsigned int *phandle)
823 {
824         int status = -EINVAL;
825         struct vchiq_state   *state = instance->state;
826         struct vchiq_service *service = NULL;
827
828         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
829
830         if (!vchiq_is_connected(instance))
831                 goto failed;
832
833         service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
834
835         if (service) {
836                 *phandle = service->handle;
837                 status = vchiq_open_service_internal(service, current->pid);
838                 if (status) {
839                         vchiq_remove_service(instance, service->handle);
840                         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
841                 }
842         }
843
844 failed:
845         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
846
847         return status;
848 }
849 EXPORT_SYMBOL(vchiq_open_service);
850
851 int
852 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
853                     unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
854 {
855         int status;
856
857         while (1) {
858                 switch (mode) {
859                 case VCHIQ_BULK_MODE_NOCALLBACK:
860                 case VCHIQ_BULK_MODE_CALLBACK:
861                         status = vchiq_bulk_transfer(instance, handle,
862                                                      (void *)data, NULL,
863                                                      size, userdata, mode,
864                                                      VCHIQ_BULK_TRANSMIT);
865                         break;
866                 case VCHIQ_BULK_MODE_BLOCKING:
867                         status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
868                                                               VCHIQ_BULK_TRANSMIT);
869                         break;
870                 default:
871                         return -EINVAL;
872                 }
873
874                 /*
875                  * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
876                  * to implement a retry mechanism since this function is
877                  * supposed to block until queued
878                  */
879                 if (status != -EAGAIN)
880                         break;
881
882                 msleep(1);
883         }
884
885         return status;
886 }
887 EXPORT_SYMBOL(vchiq_bulk_transmit);
888
889 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
890                        void *data, unsigned int size, void *userdata,
891                        enum vchiq_bulk_mode mode)
892 {
893         int status;
894
895         while (1) {
896                 switch (mode) {
897                 case VCHIQ_BULK_MODE_NOCALLBACK:
898                 case VCHIQ_BULK_MODE_CALLBACK:
899                         status = vchiq_bulk_transfer(instance, handle, data, NULL,
900                                                      size, userdata,
901                                                      mode, VCHIQ_BULK_RECEIVE);
902                         break;
903                 case VCHIQ_BULK_MODE_BLOCKING:
904                         status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
905                                                               VCHIQ_BULK_RECEIVE);
906                         break;
907                 default:
908                         return -EINVAL;
909                 }
910
911                 /*
912                  * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
913                  * to implement a retry mechanism since this function is
914                  * supposed to block until queued
915                  */
916                 if (status != -EAGAIN)
917                         break;
918
919                 msleep(1);
920         }
921
922         return status;
923 }
924 EXPORT_SYMBOL(vchiq_bulk_receive);
925
926 static int
927 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
928                              unsigned int size, enum vchiq_bulk_dir dir)
929 {
930         struct vchiq_service *service;
931         int status;
932         struct bulk_waiter_node *waiter = NULL, *iter;
933
934         service = find_service_by_handle(instance, handle);
935         if (!service)
936                 return -EINVAL;
937
938         vchiq_service_put(service);
939
940         mutex_lock(&instance->bulk_waiter_list_mutex);
941         list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
942                 if (iter->pid == current->pid) {
943                         list_del(&iter->list);
944                         waiter = iter;
945                         break;
946                 }
947         }
948         mutex_unlock(&instance->bulk_waiter_list_mutex);
949
950         if (waiter) {
951                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
952
953                 if (bulk) {
954                         /* This thread has an outstanding bulk transfer. */
955                         /* FIXME: why compare a dma address to a pointer? */
956                         if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
957                                 /*
958                                  * This is not a retry of the previous one.
959                                  * Cancel the signal when the transfer completes.
960                                  */
961                                 spin_lock(&bulk_waiter_spinlock);
962                                 bulk->userdata = NULL;
963                                 spin_unlock(&bulk_waiter_spinlock);
964                         }
965                 }
966         } else {
967                 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
968                 if (!waiter) {
969                         vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
970                         return -ENOMEM;
971                 }
972         }
973
974         status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
975                                      &waiter->bulk_waiter,
976                                      VCHIQ_BULK_MODE_BLOCKING, dir);
977         if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
978                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
979
980                 if (bulk) {
981                         /* Cancel the signal when the transfer completes. */
982                         spin_lock(&bulk_waiter_spinlock);
983                         bulk->userdata = NULL;
984                         spin_unlock(&bulk_waiter_spinlock);
985                 }
986                 kfree(waiter);
987         } else {
988                 waiter->pid = current->pid;
989                 mutex_lock(&instance->bulk_waiter_list_mutex);
990                 list_add(&waiter->list, &instance->bulk_waiter_list);
991                 mutex_unlock(&instance->bulk_waiter_list_mutex);
992                 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
993                                current->pid);
994         }
995
996         return status;
997 }
998
999 static int
1000 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
1001                struct vchiq_header *header, struct user_service *user_service,
1002                void *bulk_userdata)
1003 {
1004         struct vchiq_completion_data_kernel *completion;
1005         int insert;
1006
1007         DEBUG_INITIALISE(g_state.local);
1008
1009         insert = instance->completion_insert;
1010         while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1011                 /* Out of space - wait for the client */
1012                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1013                 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
1014                 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1015                 if (wait_for_completion_interruptible(&instance->remove_event)) {
1016                         vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
1017                         return -EAGAIN;
1018                 } else if (instance->closing) {
1019                         vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1020                         return 0;
1021                 }
1022                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1023         }
1024
1025         completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1026
1027         completion->header = header;
1028         completion->reason = reason;
1029         /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1030         completion->service_userdata = user_service->service;
1031         completion->bulk_userdata = bulk_userdata;
1032
1033         if (reason == VCHIQ_SERVICE_CLOSED) {
1034                 /*
1035                  * Take an extra reference, to be held until
1036                  * this CLOSED notification is delivered.
1037                  */
1038                 vchiq_service_get(user_service->service);
1039                 if (instance->use_close_delivered)
1040                         user_service->close_pending = 1;
1041         }
1042
1043         /*
1044          * A write barrier is needed here to ensure that the entire completion
1045          * record is written out before the insert point.
1046          */
1047         wmb();
1048
1049         if (reason == VCHIQ_MESSAGE_AVAILABLE)
1050                 user_service->message_available_pos = insert;
1051
1052         insert++;
1053         instance->completion_insert = insert;
1054
1055         complete(&instance->insert_event);
1056
1057         return 0;
1058 }
1059
1060 int
1061 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1062                  struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1063 {
1064         /*
1065          * How do we ensure the callback goes to the right client?
1066          * The service_user data points to a user_service record
1067          * containing the original callback and the user state structure, which
1068          * contains a circular buffer for completion records.
1069          */
1070         struct user_service *user_service;
1071         struct vchiq_service *service;
1072         bool skip_completion = false;
1073
1074         DEBUG_INITIALISE(g_state.local);
1075
1076         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1077
1078         rcu_read_lock();
1079         service = handle_to_service(instance, handle);
1080         if (WARN_ON(!service)) {
1081                 rcu_read_unlock();
1082                 return 0;
1083         }
1084
1085         user_service = (struct user_service *)service->base.userdata;
1086
1087         if (!instance || instance->closing) {
1088                 rcu_read_unlock();
1089                 return 0;
1090         }
1091
1092         /*
1093          * As hopping around different synchronization mechanism,
1094          * taking an extra reference results in simpler implementation.
1095          */
1096         vchiq_service_get(service);
1097         rcu_read_unlock();
1098
1099         vchiq_log_trace(vchiq_arm_log_level,
1100                         "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1101                         __func__, (unsigned long)user_service, service->localport,
1102                         user_service->userdata, reason, (unsigned long)header,
1103                         (unsigned long)instance, (unsigned long)bulk_userdata);
1104
1105         if (header && user_service->is_vchi) {
1106                 spin_lock(&msg_queue_spinlock);
1107                 while (user_service->msg_insert ==
1108                         (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1109                         spin_unlock(&msg_queue_spinlock);
1110                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1111                         DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1112                         vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1113                         /*
1114                          * If there is no MESSAGE_AVAILABLE in the completion
1115                          * queue, add one
1116                          */
1117                         if ((user_service->message_available_pos -
1118                                 instance->completion_remove) < 0) {
1119                                 int status;
1120
1121                                 vchiq_log_info(vchiq_arm_log_level,
1122                                                "Inserting extra MESSAGE_AVAILABLE");
1123                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1124                                 status = add_completion(instance, reason, NULL, user_service,
1125                                                         bulk_userdata);
1126                                 if (status) {
1127                                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1128                                         vchiq_service_put(service);
1129                                         return status;
1130                                 }
1131                         }
1132
1133                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1134                         if (wait_for_completion_interruptible(&user_service->remove_event)) {
1135                                 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1136                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1137                                 vchiq_service_put(service);
1138                                 return -EAGAIN;
1139                         } else if (instance->closing) {
1140                                 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1141                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1142                                 vchiq_service_put(service);
1143                                 return -EINVAL;
1144                         }
1145                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1146                         spin_lock(&msg_queue_spinlock);
1147                 }
1148
1149                 user_service->msg_queue[user_service->msg_insert &
1150                         (MSG_QUEUE_SIZE - 1)] = header;
1151                 user_service->msg_insert++;
1152
1153                 /*
1154                  * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1155                  * there is a MESSAGE_AVAILABLE in the completion queue then
1156                  * bypass the completion queue.
1157                  */
1158                 if (((user_service->message_available_pos -
1159                         instance->completion_remove) >= 0) ||
1160                         user_service->dequeue_pending) {
1161                         user_service->dequeue_pending = 0;
1162                         skip_completion = true;
1163                 }
1164
1165                 spin_unlock(&msg_queue_spinlock);
1166                 complete(&user_service->insert_event);
1167
1168                 header = NULL;
1169         }
1170         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1171         vchiq_service_put(service);
1172
1173         if (skip_completion)
1174                 return 0;
1175
1176         return add_completion(instance, reason, header, user_service,
1177                 bulk_userdata);
1178 }
1179
1180 int vchiq_dump(void *dump_context, const char *str, int len)
1181 {
1182         struct dump_context *context = (struct dump_context *)dump_context;
1183         int copy_bytes;
1184
1185         if (context->actual >= context->space)
1186                 return 0;
1187
1188         if (context->offset > 0) {
1189                 int skip_bytes = min_t(int, len, context->offset);
1190
1191                 str += skip_bytes;
1192                 len -= skip_bytes;
1193                 context->offset -= skip_bytes;
1194                 if (context->offset > 0)
1195                         return 0;
1196         }
1197         copy_bytes = min_t(int, len, context->space - context->actual);
1198         if (copy_bytes == 0)
1199                 return 0;
1200         if (copy_to_user(context->buf + context->actual, str,
1201                          copy_bytes))
1202                 return -EFAULT;
1203         context->actual += copy_bytes;
1204         len -= copy_bytes;
1205
1206         /*
1207          * If the terminating NUL is included in the length, then it
1208          * marks the end of a line and should be replaced with a
1209          * carriage return.
1210          */
1211         if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1212                 char cr = '\n';
1213
1214                 if (copy_to_user(context->buf + context->actual - 1,
1215                                  &cr, 1))
1216                         return -EFAULT;
1217         }
1218         return 0;
1219 }
1220
1221 int vchiq_dump_platform_instances(void *dump_context)
1222 {
1223         struct vchiq_state *state = vchiq_get_state();
1224         char buf[80];
1225         int len;
1226         int i;
1227
1228         if (!state)
1229                 return -ENOTCONN;
1230
1231         /*
1232          * There is no list of instances, so instead scan all services,
1233          * marking those that have been dumped.
1234          */
1235
1236         rcu_read_lock();
1237         for (i = 0; i < state->unused_service; i++) {
1238                 struct vchiq_service *service;
1239                 struct vchiq_instance *instance;
1240
1241                 service = rcu_dereference(state->services[i]);
1242                 if (!service || service->base.callback != service_callback)
1243                         continue;
1244
1245                 instance = service->instance;
1246                 if (instance)
1247                         instance->mark = 0;
1248         }
1249         rcu_read_unlock();
1250
1251         for (i = 0; i < state->unused_service; i++) {
1252                 struct vchiq_service *service;
1253                 struct vchiq_instance *instance;
1254                 int err;
1255
1256                 rcu_read_lock();
1257                 service = rcu_dereference(state->services[i]);
1258                 if (!service || service->base.callback != service_callback) {
1259                         rcu_read_unlock();
1260                         continue;
1261                 }
1262
1263                 instance = service->instance;
1264                 if (!instance || instance->mark) {
1265                         rcu_read_unlock();
1266                         continue;
1267                 }
1268                 rcu_read_unlock();
1269
1270                 len = snprintf(buf, sizeof(buf),
1271                                "Instance %pK: pid %d,%s completions %d/%d",
1272                                instance, instance->pid,
1273                                instance->connected ? " connected, " :
1274                                "",
1275                                instance->completion_insert -
1276                                instance->completion_remove,
1277                                MAX_COMPLETIONS);
1278                 err = vchiq_dump(dump_context, buf, len + 1);
1279                 if (err)
1280                         return err;
1281                 instance->mark = 1;
1282         }
1283         return 0;
1284 }
1285
1286 int vchiq_dump_platform_service_state(void *dump_context,
1287                                       struct vchiq_service *service)
1288 {
1289         struct user_service *user_service =
1290                         (struct user_service *)service->base.userdata;
1291         char buf[80];
1292         int len;
1293
1294         len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1295
1296         if ((service->base.callback == service_callback) && user_service->is_vchi) {
1297                 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1298                                  user_service->msg_insert - user_service->msg_remove,
1299                                  MSG_QUEUE_SIZE);
1300
1301                 if (user_service->dequeue_pending)
1302                         len += scnprintf(buf + len, sizeof(buf) - len,
1303                                 " (dequeue pending)");
1304         }
1305
1306         return vchiq_dump(dump_context, buf, len + 1);
1307 }
1308
1309 struct vchiq_state *
1310 vchiq_get_state(void)
1311 {
1312         if (!g_state.remote) {
1313                 pr_err("%s: g_state.remote == NULL\n", __func__);
1314                 return NULL;
1315         }
1316
1317         if (g_state.remote->initialised != 1) {
1318                 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1319                           __func__, g_state.remote->initialised);
1320                 return NULL;
1321         }
1322
1323         return &g_state;
1324 }
1325
1326 /*
1327  * Autosuspend related functionality
1328  */
1329
1330 static int
1331 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1332                                enum vchiq_reason reason,
1333                                struct vchiq_header *header,
1334                                unsigned int service_user, void *bulk_user)
1335 {
1336         vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1337         return 0;
1338 }
1339
1340 static int
1341 vchiq_keepalive_thread_func(void *v)
1342 {
1343         struct vchiq_state *state = (struct vchiq_state *)v;
1344         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1345
1346         int status;
1347         struct vchiq_instance *instance;
1348         unsigned int ka_handle;
1349         int ret;
1350
1351         struct vchiq_service_params_kernel params = {
1352                 .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1353                 .callback    = vchiq_keepalive_vchiq_callback,
1354                 .version     = KEEPALIVE_VER,
1355                 .version_min = KEEPALIVE_VER_MIN
1356         };
1357
1358         ret = vchiq_initialise(&instance);
1359         if (ret) {
1360                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1361                                 ret);
1362                 goto exit;
1363         }
1364
1365         status = vchiq_connect(instance);
1366         if (status) {
1367                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1368                                 status);
1369                 goto shutdown;
1370         }
1371
1372         status = vchiq_add_service(instance, &params, &ka_handle);
1373         if (status) {
1374                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1375                                 status);
1376                 goto shutdown;
1377         }
1378
1379         while (1) {
1380                 long rc = 0, uc = 0;
1381
1382                 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1383                         vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1384                         flush_signals(current);
1385                         continue;
1386                 }
1387
1388                 /*
1389                  * read and clear counters.  Do release_count then use_count to
1390                  * prevent getting more releases than uses
1391                  */
1392                 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1393                 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1394
1395                 /*
1396                  * Call use/release service the requisite number of times.
1397                  * Process use before release so use counts don't go negative
1398                  */
1399                 while (uc--) {
1400                         atomic_inc(&arm_state->ka_use_ack_count);
1401                         status = vchiq_use_service(instance, ka_handle);
1402                         if (status) {
1403                                 vchiq_log_error(vchiq_susp_log_level,
1404                                                 "%s vchiq_use_service error %d", __func__, status);
1405                         }
1406                 }
1407                 while (rc--) {
1408                         status = vchiq_release_service(instance, ka_handle);
1409                         if (status) {
1410                                 vchiq_log_error(vchiq_susp_log_level,
1411                                                 "%s vchiq_release_service error %d", __func__,
1412                                                 status);
1413                         }
1414                 }
1415         }
1416
1417 shutdown:
1418         vchiq_shutdown(instance);
1419 exit:
1420         return 0;
1421 }
1422
1423 int
1424 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1425                    enum USE_TYPE_E use_type)
1426 {
1427         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1428         int ret = 0;
1429         char entity[16];
1430         int *entity_uc;
1431         int local_uc;
1432
1433         if (!arm_state) {
1434                 ret = -EINVAL;
1435                 goto out;
1436         }
1437
1438         if (use_type == USE_TYPE_VCHIQ) {
1439                 sprintf(entity, "VCHIQ:   ");
1440                 entity_uc = &arm_state->peer_use_count;
1441         } else if (service) {
1442                 sprintf(entity, "%c%c%c%c:%03d",
1443                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1444                         service->client_id);
1445                 entity_uc = &service->service_use_count;
1446         } else {
1447                 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1448                 ret = -EINVAL;
1449                 goto out;
1450         }
1451
1452         write_lock_bh(&arm_state->susp_res_lock);
1453         local_uc = ++arm_state->videocore_use_count;
1454         ++(*entity_uc);
1455
1456         vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1457                         *entity_uc, local_uc);
1458
1459         write_unlock_bh(&arm_state->susp_res_lock);
1460
1461         if (!ret) {
1462                 int status = 0;
1463                 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1464
1465                 while (ack_cnt && !status) {
1466                         /* Send the use notify to videocore */
1467                         status = vchiq_send_remote_use_active(state);
1468                         if (!status)
1469                                 ack_cnt--;
1470                         else
1471                                 atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1472                 }
1473         }
1474
1475 out:
1476         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1477         return ret;
1478 }
1479
1480 int
1481 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1482 {
1483         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1484         int ret = 0;
1485         char entity[16];
1486         int *entity_uc;
1487
1488         if (!arm_state) {
1489                 ret = -EINVAL;
1490                 goto out;
1491         }
1492
1493         if (service) {
1494                 sprintf(entity, "%c%c%c%c:%03d",
1495                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1496                         service->client_id);
1497                 entity_uc = &service->service_use_count;
1498         } else {
1499                 sprintf(entity, "PEER:   ");
1500                 entity_uc = &arm_state->peer_use_count;
1501         }
1502
1503         write_lock_bh(&arm_state->susp_res_lock);
1504         if (!arm_state->videocore_use_count || !(*entity_uc)) {
1505                 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1506                 WARN_ON(!arm_state->videocore_use_count);
1507                 WARN_ON(!(*entity_uc));
1508                 ret = -EINVAL;
1509                 goto unlock;
1510         }
1511         --arm_state->videocore_use_count;
1512         --(*entity_uc);
1513
1514         vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1515                         *entity_uc, arm_state->videocore_use_count);
1516
1517 unlock:
1518         write_unlock_bh(&arm_state->susp_res_lock);
1519
1520 out:
1521         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1522         return ret;
1523 }
1524
1525 void
1526 vchiq_on_remote_use(struct vchiq_state *state)
1527 {
1528         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1529
1530         atomic_inc(&arm_state->ka_use_count);
1531         complete(&arm_state->ka_evt);
1532 }
1533
1534 void
1535 vchiq_on_remote_release(struct vchiq_state *state)
1536 {
1537         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1538
1539         atomic_inc(&arm_state->ka_release_count);
1540         complete(&arm_state->ka_evt);
1541 }
1542
1543 int
1544 vchiq_use_service_internal(struct vchiq_service *service)
1545 {
1546         return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1547 }
1548
1549 int
1550 vchiq_release_service_internal(struct vchiq_service *service)
1551 {
1552         return vchiq_release_internal(service->state, service);
1553 }
1554
1555 struct vchiq_debugfs_node *
1556 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1557 {
1558         return &instance->debugfs_node;
1559 }
1560
1561 int
1562 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1563 {
1564         struct vchiq_service *service;
1565         int use_count = 0, i;
1566
1567         i = 0;
1568         rcu_read_lock();
1569         while ((service = __next_service_by_instance(instance->state,
1570                                                      instance, &i)))
1571                 use_count += service->service_use_count;
1572         rcu_read_unlock();
1573         return use_count;
1574 }
1575
1576 int
1577 vchiq_instance_get_pid(struct vchiq_instance *instance)
1578 {
1579         return instance->pid;
1580 }
1581
1582 int
1583 vchiq_instance_get_trace(struct vchiq_instance *instance)
1584 {
1585         return instance->trace;
1586 }
1587
1588 void
1589 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1590 {
1591         struct vchiq_service *service;
1592         int i;
1593
1594         i = 0;
1595         rcu_read_lock();
1596         while ((service = __next_service_by_instance(instance->state,
1597                                                      instance, &i)))
1598                 service->trace = trace;
1599         rcu_read_unlock();
1600         instance->trace = (trace != 0);
1601 }
1602
1603 int
1604 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1605 {
1606         int ret = -EINVAL;
1607         struct vchiq_service *service = find_service_by_handle(instance, handle);
1608
1609         if (service) {
1610                 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1611                 vchiq_service_put(service);
1612         }
1613         return ret;
1614 }
1615 EXPORT_SYMBOL(vchiq_use_service);
1616
1617 int
1618 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1619 {
1620         int ret = -EINVAL;
1621         struct vchiq_service *service = find_service_by_handle(instance, handle);
1622
1623         if (service) {
1624                 ret = vchiq_release_internal(service->state, service);
1625                 vchiq_service_put(service);
1626         }
1627         return ret;
1628 }
1629 EXPORT_SYMBOL(vchiq_release_service);
1630
1631 struct service_data_struct {
1632         int fourcc;
1633         int clientid;
1634         int use_count;
1635 };
1636
1637 void
1638 vchiq_dump_service_use_state(struct vchiq_state *state)
1639 {
1640         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1641         struct service_data_struct *service_data;
1642         int i, found = 0;
1643         /*
1644          * If there's more than 64 services, only dump ones with
1645          * non-zero counts
1646          */
1647         int only_nonzero = 0;
1648         static const char *nz = "<-- preventing suspend";
1649
1650         int peer_count;
1651         int vc_use_count;
1652         int active_services;
1653
1654         if (!arm_state)
1655                 return;
1656
1657         service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1658                                      GFP_KERNEL);
1659         if (!service_data)
1660                 return;
1661
1662         read_lock_bh(&arm_state->susp_res_lock);
1663         peer_count = arm_state->peer_use_count;
1664         vc_use_count = arm_state->videocore_use_count;
1665         active_services = state->unused_service;
1666         if (active_services > MAX_SERVICES)
1667                 only_nonzero = 1;
1668
1669         rcu_read_lock();
1670         for (i = 0; i < active_services; i++) {
1671                 struct vchiq_service *service_ptr =
1672                         rcu_dereference(state->services[i]);
1673
1674                 if (!service_ptr)
1675                         continue;
1676
1677                 if (only_nonzero && !service_ptr->service_use_count)
1678                         continue;
1679
1680                 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1681                         continue;
1682
1683                 service_data[found].fourcc = service_ptr->base.fourcc;
1684                 service_data[found].clientid = service_ptr->client_id;
1685                 service_data[found].use_count = service_ptr->service_use_count;
1686                 found++;
1687                 if (found >= MAX_SERVICES)
1688                         break;
1689         }
1690         rcu_read_unlock();
1691
1692         read_unlock_bh(&arm_state->susp_res_lock);
1693
1694         if (only_nonzero)
1695                 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1696                                   active_services, found);
1697
1698         for (i = 0; i < found; i++) {
1699                 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1700                                   VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1701                                   service_data[i].clientid, service_data[i].use_count,
1702                                   service_data[i].use_count ? nz : "");
1703         }
1704         vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1705         vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1706                           vc_use_count);
1707
1708         kfree(service_data);
1709 }
1710
1711 int
1712 vchiq_check_service(struct vchiq_service *service)
1713 {
1714         struct vchiq_arm_state *arm_state;
1715         int ret = -EINVAL;
1716
1717         if (!service || !service->state)
1718                 goto out;
1719
1720         arm_state = vchiq_platform_get_arm_state(service->state);
1721
1722         read_lock_bh(&arm_state->susp_res_lock);
1723         if (service->service_use_count)
1724                 ret = 0;
1725         read_unlock_bh(&arm_state->susp_res_lock);
1726
1727         if (ret) {
1728                 vchiq_log_error(vchiq_susp_log_level,
1729                                 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1730                                 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1731                                 service->service_use_count, arm_state->videocore_use_count);
1732                 vchiq_dump_service_use_state(service->state);
1733         }
1734 out:
1735         return ret;
1736 }
1737
1738 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1739                                        enum vchiq_connstate oldstate,
1740                                        enum vchiq_connstate newstate)
1741 {
1742         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1743         char threadname[16];
1744
1745         vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1746                        get_conn_state_name(oldstate), get_conn_state_name(newstate));
1747         if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1748                 return;
1749
1750         write_lock_bh(&arm_state->susp_res_lock);
1751         if (arm_state->first_connect) {
1752                 write_unlock_bh(&arm_state->susp_res_lock);
1753                 return;
1754         }
1755
1756         arm_state->first_connect = 1;
1757         write_unlock_bh(&arm_state->susp_res_lock);
1758         snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1759                  state->id);
1760         arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1761                                               (void *)state,
1762                                               threadname);
1763         if (IS_ERR(arm_state->ka_thread)) {
1764                 vchiq_log_error(vchiq_susp_log_level,
1765                                 "vchiq: FATAL: couldn't create thread %s",
1766                                 threadname);
1767         } else {
1768                 wake_up_process(arm_state->ka_thread);
1769         }
1770 }
1771
1772 static const struct of_device_id vchiq_of_match[] = {
1773         { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1774         { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1775         {},
1776 };
1777 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1778
1779 static int vchiq_probe(struct platform_device *pdev)
1780 {
1781         struct device_node *fw_node;
1782         const struct of_device_id *of_id;
1783         struct vchiq_drvdata *drvdata;
1784         int err;
1785
1786         of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1787         drvdata = (struct vchiq_drvdata *)of_id->data;
1788         if (!drvdata)
1789                 return -EINVAL;
1790
1791         fw_node = of_find_compatible_node(NULL, NULL,
1792                                           "raspberrypi,bcm2835-firmware");
1793         if (!fw_node) {
1794                 dev_err(&pdev->dev, "Missing firmware node\n");
1795                 return -ENOENT;
1796         }
1797
1798         drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1799         of_node_put(fw_node);
1800         if (!drvdata->fw)
1801                 return -EPROBE_DEFER;
1802
1803         platform_set_drvdata(pdev, drvdata);
1804
1805         err = vchiq_platform_init(pdev, &g_state);
1806         if (err)
1807                 goto failed_platform_init;
1808
1809         vchiq_debugfs_init();
1810
1811         vchiq_log_info(vchiq_arm_log_level,
1812                        "vchiq: platform initialised - version %d (min %d)",
1813                        VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1814
1815         /*
1816          * Simply exit on error since the function handles cleanup in
1817          * cases of failure.
1818          */
1819         err = vchiq_register_chrdev(&pdev->dev);
1820         if (err) {
1821                 vchiq_log_warning(vchiq_arm_log_level,
1822                                   "Failed to initialize vchiq cdev");
1823                 goto error_exit;
1824         }
1825
1826         bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1827         bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1828
1829         return 0;
1830
1831 failed_platform_init:
1832         vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1833 error_exit:
1834         return err;
1835 }
1836
1837 static void vchiq_remove(struct platform_device *pdev)
1838 {
1839         vchiq_device_unregister(bcm2835_audio);
1840         vchiq_device_unregister(bcm2835_camera);
1841         vchiq_debugfs_deinit();
1842         vchiq_deregister_chrdev();
1843 }
1844
1845 static struct platform_driver vchiq_driver = {
1846         .driver = {
1847                 .name = "bcm2835_vchiq",
1848                 .of_match_table = vchiq_of_match,
1849         },
1850         .probe = vchiq_probe,
1851         .remove_new = vchiq_remove,
1852 };
1853
1854 static int __init vchiq_driver_init(void)
1855 {
1856         int ret;
1857
1858         ret = bus_register(&vchiq_bus_type);
1859         if (ret) {
1860                 pr_err("Failed to register %s\n", vchiq_bus_type.name);
1861                 return ret;
1862         }
1863
1864         ret = platform_driver_register(&vchiq_driver);
1865         if (ret) {
1866                 pr_err("Failed to register vchiq driver\n");
1867                 bus_unregister(&vchiq_bus_type);
1868         }
1869
1870         return ret;
1871 }
1872 module_init(vchiq_driver_init);
1873
1874 static void __exit vchiq_driver_exit(void)
1875 {
1876         bus_unregister(&vchiq_bus_type);
1877         platform_driver_unregister(&vchiq_driver);
1878 }
1879 module_exit(vchiq_driver_exit);
1880
1881 MODULE_LICENSE("Dual BSD/GPL");
1882 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1883 MODULE_AUTHOR("Broadcom Corporation");