x86/mm: Limit mmap() of /dev/mem to valid physical addresses
[sfrench/cifs-2.6.git] / drivers / irqchip / irq-gic-v3-its.c
1 /*
2  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/log2.h>
27 #include <linux/mm.h>
28 #include <linux/msi.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_pci.h>
33 #include <linux/of_platform.h>
34 #include <linux/percpu.h>
35 #include <linux/slab.h>
36
37 #include <linux/irqchip.h>
38 #include <linux/irqchip/arm-gic-v3.h>
39 #include <linux/irqchip/arm-gic-v4.h>
40
41 #include <asm/cputype.h>
42 #include <asm/exception.h>
43
44 #include "irq-gic-common.h"
45
46 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING           (1ULL << 0)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375       (1ULL << 1)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144       (1ULL << 2)
49
50 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING     (1 << 0)
51
52 static u32 lpi_id_bits;
53
54 /*
55  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
56  * deal with (one configuration byte per interrupt). PENDBASE has to
57  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
58  */
59 #define LPI_NRBITS              lpi_id_bits
60 #define LPI_PROPBASE_SZ         ALIGN(BIT(LPI_NRBITS), SZ_64K)
61 #define LPI_PENDBASE_SZ         ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63 #define LPI_PROP_DEFAULT_PRIO   0xa0
64
65 /*
66  * Collection structure - just an ID, and a redistributor address to
67  * ping. We use one per CPU as a bag of interrupts assigned to this
68  * CPU.
69  */
70 struct its_collection {
71         u64                     target_address;
72         u16                     col_id;
73 };
74
75 /*
76  * The ITS_BASER structure - contains memory information, cached
77  * value of BASER register configuration and ITS page size.
78  */
79 struct its_baser {
80         void            *base;
81         u64             val;
82         u32             order;
83         u32             psz;
84 };
85
86 /*
87  * The ITS structure - contains most of the infrastructure, with the
88  * top-level MSI domain, the command queue, the collections, and the
89  * list of devices writing to it.
90  */
91 struct its_node {
92         raw_spinlock_t          lock;
93         struct list_head        entry;
94         void __iomem            *base;
95         phys_addr_t             phys_base;
96         struct its_cmd_block    *cmd_base;
97         struct its_cmd_block    *cmd_write;
98         struct its_baser        tables[GITS_BASER_NR_REGS];
99         struct its_collection   *collections;
100         struct list_head        its_device_list;
101         u64                     flags;
102         u32                     ite_size;
103         u32                     device_ids;
104         int                     numa_node;
105         bool                    is_v4;
106 };
107
108 #define ITS_ITT_ALIGN           SZ_256
109
110 /* Convert page order to size in bytes */
111 #define PAGE_ORDER_TO_SIZE(o)   (PAGE_SIZE << (o))
112
113 struct event_lpi_map {
114         unsigned long           *lpi_map;
115         u16                     *col_map;
116         irq_hw_number_t         lpi_base;
117         int                     nr_lpis;
118         struct mutex            vlpi_lock;
119         struct its_vm           *vm;
120         struct its_vlpi_map     *vlpi_maps;
121         int                     nr_vlpis;
122 };
123
124 /*
125  * The ITS view of a device - belongs to an ITS, owns an interrupt
126  * translation table, and a list of interrupts.  If it some of its
127  * LPIs are injected into a guest (GICv4), the event_map.vm field
128  * indicates which one.
129  */
130 struct its_device {
131         struct list_head        entry;
132         struct its_node         *its;
133         struct event_lpi_map    event_map;
134         void                    *itt;
135         u32                     nr_ites;
136         u32                     device_id;
137 };
138
139 static struct {
140         raw_spinlock_t          lock;
141         struct its_device       *dev;
142         struct its_vpe          **vpes;
143         int                     next_victim;
144 } vpe_proxy;
145
146 static LIST_HEAD(its_nodes);
147 static DEFINE_SPINLOCK(its_lock);
148 static struct rdists *gic_rdists;
149 static struct irq_domain *its_parent;
150
151 /*
152  * We have a maximum number of 16 ITSs in the whole system if we're
153  * using the ITSList mechanism
154  */
155 #define ITS_LIST_MAX            16
156
157 static unsigned long its_list_map;
158 static u16 vmovp_seq_num;
159 static DEFINE_RAW_SPINLOCK(vmovp_lock);
160
161 static DEFINE_IDA(its_vpeid_ida);
162
163 #define gic_data_rdist()                (raw_cpu_ptr(gic_rdists->rdist))
164 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
165 #define gic_data_rdist_vlpi_base()      (gic_data_rdist_rd_base() + SZ_128K)
166
167 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
168                                                u32 event)
169 {
170         struct its_node *its = its_dev->its;
171
172         return its->collections + its_dev->event_map.col_map[event];
173 }
174
175 /*
176  * ITS command descriptors - parameters to be encoded in a command
177  * block.
178  */
179 struct its_cmd_desc {
180         union {
181                 struct {
182                         struct its_device *dev;
183                         u32 event_id;
184                 } its_inv_cmd;
185
186                 struct {
187                         struct its_device *dev;
188                         u32 event_id;
189                 } its_clear_cmd;
190
191                 struct {
192                         struct its_device *dev;
193                         u32 event_id;
194                 } its_int_cmd;
195
196                 struct {
197                         struct its_device *dev;
198                         int valid;
199                 } its_mapd_cmd;
200
201                 struct {
202                         struct its_collection *col;
203                         int valid;
204                 } its_mapc_cmd;
205
206                 struct {
207                         struct its_device *dev;
208                         u32 phys_id;
209                         u32 event_id;
210                 } its_mapti_cmd;
211
212                 struct {
213                         struct its_device *dev;
214                         struct its_collection *col;
215                         u32 event_id;
216                 } its_movi_cmd;
217
218                 struct {
219                         struct its_device *dev;
220                         u32 event_id;
221                 } its_discard_cmd;
222
223                 struct {
224                         struct its_collection *col;
225                 } its_invall_cmd;
226
227                 struct {
228                         struct its_vpe *vpe;
229                 } its_vinvall_cmd;
230
231                 struct {
232                         struct its_vpe *vpe;
233                         struct its_collection *col;
234                         bool valid;
235                 } its_vmapp_cmd;
236
237                 struct {
238                         struct its_vpe *vpe;
239                         struct its_device *dev;
240                         u32 virt_id;
241                         u32 event_id;
242                         bool db_enabled;
243                 } its_vmapti_cmd;
244
245                 struct {
246                         struct its_vpe *vpe;
247                         struct its_device *dev;
248                         u32 event_id;
249                         bool db_enabled;
250                 } its_vmovi_cmd;
251
252                 struct {
253                         struct its_vpe *vpe;
254                         struct its_collection *col;
255                         u16 seq_num;
256                         u16 its_list;
257                 } its_vmovp_cmd;
258         };
259 };
260
261 /*
262  * The ITS command block, which is what the ITS actually parses.
263  */
264 struct its_cmd_block {
265         u64     raw_cmd[4];
266 };
267
268 #define ITS_CMD_QUEUE_SZ                SZ_64K
269 #define ITS_CMD_QUEUE_NR_ENTRIES        (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
270
271 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
272                                                     struct its_cmd_desc *);
273
274 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
275                                               struct its_cmd_desc *);
276
277 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
278 {
279         u64 mask = GENMASK_ULL(h, l);
280         *raw_cmd &= ~mask;
281         *raw_cmd |= (val << l) & mask;
282 }
283
284 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
285 {
286         its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
287 }
288
289 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
290 {
291         its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
292 }
293
294 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
295 {
296         its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
297 }
298
299 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
300 {
301         its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
302 }
303
304 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
305 {
306         its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
307 }
308
309 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
310 {
311         its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
312 }
313
314 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
315 {
316         its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
317 }
318
319 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
320 {
321         its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
322 }
323
324 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
325 {
326         its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
327 }
328
329 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
330 {
331         its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
332 }
333
334 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
335 {
336         its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
337 }
338
339 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
340 {
341         its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
342 }
343
344 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
345 {
346         its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
347 }
348
349 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
350 {
351         its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
352 }
353
354 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
355 {
356         its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
357 }
358
359 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
360 {
361         its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
362 }
363
364 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
365 {
366         its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
367 }
368
369 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
370 {
371         /* Let's fixup BE commands */
372         cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
373         cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
374         cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
375         cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
376 }
377
378 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
379                                                  struct its_cmd_desc *desc)
380 {
381         unsigned long itt_addr;
382         u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
383
384         itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
385         itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
386
387         its_encode_cmd(cmd, GITS_CMD_MAPD);
388         its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
389         its_encode_size(cmd, size - 1);
390         its_encode_itt(cmd, itt_addr);
391         its_encode_valid(cmd, desc->its_mapd_cmd.valid);
392
393         its_fixup_cmd(cmd);
394
395         return NULL;
396 }
397
398 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
399                                                  struct its_cmd_desc *desc)
400 {
401         its_encode_cmd(cmd, GITS_CMD_MAPC);
402         its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
403         its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
404         its_encode_valid(cmd, desc->its_mapc_cmd.valid);
405
406         its_fixup_cmd(cmd);
407
408         return desc->its_mapc_cmd.col;
409 }
410
411 static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
412                                                   struct its_cmd_desc *desc)
413 {
414         struct its_collection *col;
415
416         col = dev_event_to_col(desc->its_mapti_cmd.dev,
417                                desc->its_mapti_cmd.event_id);
418
419         its_encode_cmd(cmd, GITS_CMD_MAPTI);
420         its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
421         its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
422         its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
423         its_encode_collection(cmd, col->col_id);
424
425         its_fixup_cmd(cmd);
426
427         return col;
428 }
429
430 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
431                                                  struct its_cmd_desc *desc)
432 {
433         struct its_collection *col;
434
435         col = dev_event_to_col(desc->its_movi_cmd.dev,
436                                desc->its_movi_cmd.event_id);
437
438         its_encode_cmd(cmd, GITS_CMD_MOVI);
439         its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
440         its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
441         its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
442
443         its_fixup_cmd(cmd);
444
445         return col;
446 }
447
448 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
449                                                     struct its_cmd_desc *desc)
450 {
451         struct its_collection *col;
452
453         col = dev_event_to_col(desc->its_discard_cmd.dev,
454                                desc->its_discard_cmd.event_id);
455
456         its_encode_cmd(cmd, GITS_CMD_DISCARD);
457         its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
458         its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
459
460         its_fixup_cmd(cmd);
461
462         return col;
463 }
464
465 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
466                                                 struct its_cmd_desc *desc)
467 {
468         struct its_collection *col;
469
470         col = dev_event_to_col(desc->its_inv_cmd.dev,
471                                desc->its_inv_cmd.event_id);
472
473         its_encode_cmd(cmd, GITS_CMD_INV);
474         its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
475         its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
476
477         its_fixup_cmd(cmd);
478
479         return col;
480 }
481
482 static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
483                                                 struct its_cmd_desc *desc)
484 {
485         struct its_collection *col;
486
487         col = dev_event_to_col(desc->its_int_cmd.dev,
488                                desc->its_int_cmd.event_id);
489
490         its_encode_cmd(cmd, GITS_CMD_INT);
491         its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
492         its_encode_event_id(cmd, desc->its_int_cmd.event_id);
493
494         its_fixup_cmd(cmd);
495
496         return col;
497 }
498
499 static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
500                                                   struct its_cmd_desc *desc)
501 {
502         struct its_collection *col;
503
504         col = dev_event_to_col(desc->its_clear_cmd.dev,
505                                desc->its_clear_cmd.event_id);
506
507         its_encode_cmd(cmd, GITS_CMD_CLEAR);
508         its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
509         its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
510
511         its_fixup_cmd(cmd);
512
513         return col;
514 }
515
516 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
517                                                    struct its_cmd_desc *desc)
518 {
519         its_encode_cmd(cmd, GITS_CMD_INVALL);
520         its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
521
522         its_fixup_cmd(cmd);
523
524         return NULL;
525 }
526
527 static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
528                                              struct its_cmd_desc *desc)
529 {
530         its_encode_cmd(cmd, GITS_CMD_VINVALL);
531         its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
532
533         its_fixup_cmd(cmd);
534
535         return desc->its_vinvall_cmd.vpe;
536 }
537
538 static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
539                                            struct its_cmd_desc *desc)
540 {
541         unsigned long vpt_addr;
542
543         vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
544
545         its_encode_cmd(cmd, GITS_CMD_VMAPP);
546         its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
547         its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
548         its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
549         its_encode_vpt_addr(cmd, vpt_addr);
550         its_encode_vpt_size(cmd, LPI_NRBITS - 1);
551
552         its_fixup_cmd(cmd);
553
554         return desc->its_vmapp_cmd.vpe;
555 }
556
557 static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
558                                             struct its_cmd_desc *desc)
559 {
560         u32 db;
561
562         if (desc->its_vmapti_cmd.db_enabled)
563                 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
564         else
565                 db = 1023;
566
567         its_encode_cmd(cmd, GITS_CMD_VMAPTI);
568         its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
569         its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
570         its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
571         its_encode_db_phys_id(cmd, db);
572         its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
573
574         its_fixup_cmd(cmd);
575
576         return desc->its_vmapti_cmd.vpe;
577 }
578
579 static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
580                                            struct its_cmd_desc *desc)
581 {
582         u32 db;
583
584         if (desc->its_vmovi_cmd.db_enabled)
585                 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
586         else
587                 db = 1023;
588
589         its_encode_cmd(cmd, GITS_CMD_VMOVI);
590         its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
591         its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
592         its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
593         its_encode_db_phys_id(cmd, db);
594         its_encode_db_valid(cmd, true);
595
596         its_fixup_cmd(cmd);
597
598         return desc->its_vmovi_cmd.vpe;
599 }
600
601 static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
602                                            struct its_cmd_desc *desc)
603 {
604         its_encode_cmd(cmd, GITS_CMD_VMOVP);
605         its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
606         its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
607         its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
608         its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
609
610         its_fixup_cmd(cmd);
611
612         return desc->its_vmovp_cmd.vpe;
613 }
614
615 static u64 its_cmd_ptr_to_offset(struct its_node *its,
616                                  struct its_cmd_block *ptr)
617 {
618         return (ptr - its->cmd_base) * sizeof(*ptr);
619 }
620
621 static int its_queue_full(struct its_node *its)
622 {
623         int widx;
624         int ridx;
625
626         widx = its->cmd_write - its->cmd_base;
627         ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
628
629         /* This is incredibly unlikely to happen, unless the ITS locks up. */
630         if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
631                 return 1;
632
633         return 0;
634 }
635
636 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
637 {
638         struct its_cmd_block *cmd;
639         u32 count = 1000000;    /* 1s! */
640
641         while (its_queue_full(its)) {
642                 count--;
643                 if (!count) {
644                         pr_err_ratelimited("ITS queue not draining\n");
645                         return NULL;
646                 }
647                 cpu_relax();
648                 udelay(1);
649         }
650
651         cmd = its->cmd_write++;
652
653         /* Handle queue wrapping */
654         if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
655                 its->cmd_write = its->cmd_base;
656
657         /* Clear command  */
658         cmd->raw_cmd[0] = 0;
659         cmd->raw_cmd[1] = 0;
660         cmd->raw_cmd[2] = 0;
661         cmd->raw_cmd[3] = 0;
662
663         return cmd;
664 }
665
666 static struct its_cmd_block *its_post_commands(struct its_node *its)
667 {
668         u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
669
670         writel_relaxed(wr, its->base + GITS_CWRITER);
671
672         return its->cmd_write;
673 }
674
675 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
676 {
677         /*
678          * Make sure the commands written to memory are observable by
679          * the ITS.
680          */
681         if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
682                 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
683         else
684                 dsb(ishst);
685 }
686
687 static void its_wait_for_range_completion(struct its_node *its,
688                                           struct its_cmd_block *from,
689                                           struct its_cmd_block *to)
690 {
691         u64 rd_idx, from_idx, to_idx;
692         u32 count = 1000000;    /* 1s! */
693
694         from_idx = its_cmd_ptr_to_offset(its, from);
695         to_idx = its_cmd_ptr_to_offset(its, to);
696
697         while (1) {
698                 rd_idx = readl_relaxed(its->base + GITS_CREADR);
699
700                 /* Direct case */
701                 if (from_idx < to_idx && rd_idx >= to_idx)
702                         break;
703
704                 /* Wrapped case */
705                 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
706                         break;
707
708                 count--;
709                 if (!count) {
710                         pr_err_ratelimited("ITS queue timeout\n");
711                         return;
712                 }
713                 cpu_relax();
714                 udelay(1);
715         }
716 }
717
718 /* Warning, macro hell follows */
719 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)       \
720 void name(struct its_node *its,                                         \
721           buildtype builder,                                            \
722           struct its_cmd_desc *desc)                                    \
723 {                                                                       \
724         struct its_cmd_block *cmd, *sync_cmd, *next_cmd;                \
725         synctype *sync_obj;                                             \
726         unsigned long flags;                                            \
727                                                                         \
728         raw_spin_lock_irqsave(&its->lock, flags);                       \
729                                                                         \
730         cmd = its_allocate_entry(its);                                  \
731         if (!cmd) {             /* We're soooooo screewed... */         \
732                 raw_spin_unlock_irqrestore(&its->lock, flags);          \
733                 return;                                                 \
734         }                                                               \
735         sync_obj = builder(cmd, desc);                                  \
736         its_flush_cmd(its, cmd);                                        \
737                                                                         \
738         if (sync_obj) {                                                 \
739                 sync_cmd = its_allocate_entry(its);                     \
740                 if (!sync_cmd)                                          \
741                         goto post;                                      \
742                                                                         \
743                 buildfn(sync_cmd, sync_obj);                            \
744                 its_flush_cmd(its, sync_cmd);                           \
745         }                                                               \
746                                                                         \
747 post:                                                                   \
748         next_cmd = its_post_commands(its);                              \
749         raw_spin_unlock_irqrestore(&its->lock, flags);                  \
750                                                                         \
751         its_wait_for_range_completion(its, cmd, next_cmd);              \
752 }
753
754 static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
755                                struct its_collection *sync_col)
756 {
757         its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
758         its_encode_target(sync_cmd, sync_col->target_address);
759
760         its_fixup_cmd(sync_cmd);
761 }
762
763 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
764                              struct its_collection, its_build_sync_cmd)
765
766 static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
767                                 struct its_vpe *sync_vpe)
768 {
769         its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
770         its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
771
772         its_fixup_cmd(sync_cmd);
773 }
774
775 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
776                              struct its_vpe, its_build_vsync_cmd)
777
778 static void its_send_int(struct its_device *dev, u32 event_id)
779 {
780         struct its_cmd_desc desc;
781
782         desc.its_int_cmd.dev = dev;
783         desc.its_int_cmd.event_id = event_id;
784
785         its_send_single_command(dev->its, its_build_int_cmd, &desc);
786 }
787
788 static void its_send_clear(struct its_device *dev, u32 event_id)
789 {
790         struct its_cmd_desc desc;
791
792         desc.its_clear_cmd.dev = dev;
793         desc.its_clear_cmd.event_id = event_id;
794
795         its_send_single_command(dev->its, its_build_clear_cmd, &desc);
796 }
797
798 static void its_send_inv(struct its_device *dev, u32 event_id)
799 {
800         struct its_cmd_desc desc;
801
802         desc.its_inv_cmd.dev = dev;
803         desc.its_inv_cmd.event_id = event_id;
804
805         its_send_single_command(dev->its, its_build_inv_cmd, &desc);
806 }
807
808 static void its_send_mapd(struct its_device *dev, int valid)
809 {
810         struct its_cmd_desc desc;
811
812         desc.its_mapd_cmd.dev = dev;
813         desc.its_mapd_cmd.valid = !!valid;
814
815         its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
816 }
817
818 static void its_send_mapc(struct its_node *its, struct its_collection *col,
819                           int valid)
820 {
821         struct its_cmd_desc desc;
822
823         desc.its_mapc_cmd.col = col;
824         desc.its_mapc_cmd.valid = !!valid;
825
826         its_send_single_command(its, its_build_mapc_cmd, &desc);
827 }
828
829 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
830 {
831         struct its_cmd_desc desc;
832
833         desc.its_mapti_cmd.dev = dev;
834         desc.its_mapti_cmd.phys_id = irq_id;
835         desc.its_mapti_cmd.event_id = id;
836
837         its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
838 }
839
840 static void its_send_movi(struct its_device *dev,
841                           struct its_collection *col, u32 id)
842 {
843         struct its_cmd_desc desc;
844
845         desc.its_movi_cmd.dev = dev;
846         desc.its_movi_cmd.col = col;
847         desc.its_movi_cmd.event_id = id;
848
849         its_send_single_command(dev->its, its_build_movi_cmd, &desc);
850 }
851
852 static void its_send_discard(struct its_device *dev, u32 id)
853 {
854         struct its_cmd_desc desc;
855
856         desc.its_discard_cmd.dev = dev;
857         desc.its_discard_cmd.event_id = id;
858
859         its_send_single_command(dev->its, its_build_discard_cmd, &desc);
860 }
861
862 static void its_send_invall(struct its_node *its, struct its_collection *col)
863 {
864         struct its_cmd_desc desc;
865
866         desc.its_invall_cmd.col = col;
867
868         its_send_single_command(its, its_build_invall_cmd, &desc);
869 }
870
871 static void its_send_vmapti(struct its_device *dev, u32 id)
872 {
873         struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
874         struct its_cmd_desc desc;
875
876         desc.its_vmapti_cmd.vpe = map->vpe;
877         desc.its_vmapti_cmd.dev = dev;
878         desc.its_vmapti_cmd.virt_id = map->vintid;
879         desc.its_vmapti_cmd.event_id = id;
880         desc.its_vmapti_cmd.db_enabled = map->db_enabled;
881
882         its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
883 }
884
885 static void its_send_vmovi(struct its_device *dev, u32 id)
886 {
887         struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
888         struct its_cmd_desc desc;
889
890         desc.its_vmovi_cmd.vpe = map->vpe;
891         desc.its_vmovi_cmd.dev = dev;
892         desc.its_vmovi_cmd.event_id = id;
893         desc.its_vmovi_cmd.db_enabled = map->db_enabled;
894
895         its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
896 }
897
898 static void its_send_vmapp(struct its_vpe *vpe, bool valid)
899 {
900         struct its_cmd_desc desc;
901         struct its_node *its;
902
903         desc.its_vmapp_cmd.vpe = vpe;
904         desc.its_vmapp_cmd.valid = valid;
905
906         list_for_each_entry(its, &its_nodes, entry) {
907                 if (!its->is_v4)
908                         continue;
909
910                 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
911                 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
912         }
913 }
914
915 static void its_send_vmovp(struct its_vpe *vpe)
916 {
917         struct its_cmd_desc desc;
918         struct its_node *its;
919         unsigned long flags;
920         int col_id = vpe->col_idx;
921
922         desc.its_vmovp_cmd.vpe = vpe;
923         desc.its_vmovp_cmd.its_list = (u16)its_list_map;
924
925         if (!its_list_map) {
926                 its = list_first_entry(&its_nodes, struct its_node, entry);
927                 desc.its_vmovp_cmd.seq_num = 0;
928                 desc.its_vmovp_cmd.col = &its->collections[col_id];
929                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
930                 return;
931         }
932
933         /*
934          * Yet another marvel of the architecture. If using the
935          * its_list "feature", we need to make sure that all ITSs
936          * receive all VMOVP commands in the same order. The only way
937          * to guarantee this is to make vmovp a serialization point.
938          *
939          * Wall <-- Head.
940          */
941         raw_spin_lock_irqsave(&vmovp_lock, flags);
942
943         desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
944
945         /* Emit VMOVPs */
946         list_for_each_entry(its, &its_nodes, entry) {
947                 if (!its->is_v4)
948                         continue;
949
950                 desc.its_vmovp_cmd.col = &its->collections[col_id];
951                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
952         }
953
954         raw_spin_unlock_irqrestore(&vmovp_lock, flags);
955 }
956
957 static void its_send_vinvall(struct its_vpe *vpe)
958 {
959         struct its_cmd_desc desc;
960         struct its_node *its;
961
962         desc.its_vinvall_cmd.vpe = vpe;
963
964         list_for_each_entry(its, &its_nodes, entry) {
965                 if (!its->is_v4)
966                         continue;
967                 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
968         }
969 }
970
971 /*
972  * irqchip functions - assumes MSI, mostly.
973  */
974
975 static inline u32 its_get_event_id(struct irq_data *d)
976 {
977         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
978         return d->hwirq - its_dev->event_map.lpi_base;
979 }
980
981 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
982 {
983         irq_hw_number_t hwirq;
984         struct page *prop_page;
985         u8 *cfg;
986
987         if (irqd_is_forwarded_to_vcpu(d)) {
988                 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
989                 u32 event = its_get_event_id(d);
990
991                 prop_page = its_dev->event_map.vm->vprop_page;
992                 hwirq = its_dev->event_map.vlpi_maps[event].vintid;
993         } else {
994                 prop_page = gic_rdists->prop_page;
995                 hwirq = d->hwirq;
996         }
997
998         cfg = page_address(prop_page) + hwirq - 8192;
999         *cfg &= ~clr;
1000         *cfg |= set | LPI_PROP_GROUP1;
1001
1002         /*
1003          * Make the above write visible to the redistributors.
1004          * And yes, we're flushing exactly: One. Single. Byte.
1005          * Humpf...
1006          */
1007         if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1008                 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1009         else
1010                 dsb(ishst);
1011 }
1012
1013 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1014 {
1015         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1016
1017         lpi_write_config(d, clr, set);
1018         its_send_inv(its_dev, its_get_event_id(d));
1019 }
1020
1021 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1022 {
1023         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1024         u32 event = its_get_event_id(d);
1025
1026         if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1027                 return;
1028
1029         its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1030
1031         /*
1032          * More fun with the architecture:
1033          *
1034          * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1035          * value or to 1023, depending on the enable bit. But that
1036          * would be issueing a mapping for an /existing/ DevID+EventID
1037          * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1038          * to the /same/ vPE, using this opportunity to adjust the
1039          * doorbell. Mouahahahaha. We loves it, Precious.
1040          */
1041         its_send_vmovi(its_dev, event);
1042 }
1043
1044 static void its_mask_irq(struct irq_data *d)
1045 {
1046         if (irqd_is_forwarded_to_vcpu(d))
1047                 its_vlpi_set_doorbell(d, false);
1048
1049         lpi_update_config(d, LPI_PROP_ENABLED, 0);
1050 }
1051
1052 static void its_unmask_irq(struct irq_data *d)
1053 {
1054         if (irqd_is_forwarded_to_vcpu(d))
1055                 its_vlpi_set_doorbell(d, true);
1056
1057         lpi_update_config(d, 0, LPI_PROP_ENABLED);
1058 }
1059
1060 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1061                             bool force)
1062 {
1063         unsigned int cpu;
1064         const struct cpumask *cpu_mask = cpu_online_mask;
1065         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1066         struct its_collection *target_col;
1067         u32 id = its_get_event_id(d);
1068
1069         /* A forwarded interrupt should use irq_set_vcpu_affinity */
1070         if (irqd_is_forwarded_to_vcpu(d))
1071                 return -EINVAL;
1072
1073        /* lpi cannot be routed to a redistributor that is on a foreign node */
1074         if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1075                 if (its_dev->its->numa_node >= 0) {
1076                         cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1077                         if (!cpumask_intersects(mask_val, cpu_mask))
1078                                 return -EINVAL;
1079                 }
1080         }
1081
1082         cpu = cpumask_any_and(mask_val, cpu_mask);
1083
1084         if (cpu >= nr_cpu_ids)
1085                 return -EINVAL;
1086
1087         /* don't set the affinity when the target cpu is same as current one */
1088         if (cpu != its_dev->event_map.col_map[id]) {
1089                 target_col = &its_dev->its->collections[cpu];
1090                 its_send_movi(its_dev, target_col, id);
1091                 its_dev->event_map.col_map[id] = cpu;
1092                 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1093         }
1094
1095         return IRQ_SET_MASK_OK_DONE;
1096 }
1097
1098 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1099 {
1100         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1101         struct its_node *its;
1102         u64 addr;
1103
1104         its = its_dev->its;
1105         addr = its->phys_base + GITS_TRANSLATER;
1106
1107         msg->address_lo         = lower_32_bits(addr);
1108         msg->address_hi         = upper_32_bits(addr);
1109         msg->data               = its_get_event_id(d);
1110
1111         iommu_dma_map_msi_msg(d->irq, msg);
1112 }
1113
1114 static int its_irq_set_irqchip_state(struct irq_data *d,
1115                                      enum irqchip_irq_state which,
1116                                      bool state)
1117 {
1118         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1119         u32 event = its_get_event_id(d);
1120
1121         if (which != IRQCHIP_STATE_PENDING)
1122                 return -EINVAL;
1123
1124         if (state)
1125                 its_send_int(its_dev, event);
1126         else
1127                 its_send_clear(its_dev, event);
1128
1129         return 0;
1130 }
1131
1132 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1133 {
1134         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1135         u32 event = its_get_event_id(d);
1136         int ret = 0;
1137
1138         if (!info->map)
1139                 return -EINVAL;
1140
1141         mutex_lock(&its_dev->event_map.vlpi_lock);
1142
1143         if (!its_dev->event_map.vm) {
1144                 struct its_vlpi_map *maps;
1145
1146                 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1147                                GFP_KERNEL);
1148                 if (!maps) {
1149                         ret = -ENOMEM;
1150                         goto out;
1151                 }
1152
1153                 its_dev->event_map.vm = info->map->vm;
1154                 its_dev->event_map.vlpi_maps = maps;
1155         } else if (its_dev->event_map.vm != info->map->vm) {
1156                 ret = -EINVAL;
1157                 goto out;
1158         }
1159
1160         /* Get our private copy of the mapping information */
1161         its_dev->event_map.vlpi_maps[event] = *info->map;
1162
1163         if (irqd_is_forwarded_to_vcpu(d)) {
1164                 /* Already mapped, move it around */
1165                 its_send_vmovi(its_dev, event);
1166         } else {
1167                 /* Drop the physical mapping */
1168                 its_send_discard(its_dev, event);
1169
1170                 /* and install the virtual one */
1171                 its_send_vmapti(its_dev, event);
1172                 irqd_set_forwarded_to_vcpu(d);
1173
1174                 /* Increment the number of VLPIs */
1175                 its_dev->event_map.nr_vlpis++;
1176         }
1177
1178 out:
1179         mutex_unlock(&its_dev->event_map.vlpi_lock);
1180         return ret;
1181 }
1182
1183 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1184 {
1185         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1186         u32 event = its_get_event_id(d);
1187         int ret = 0;
1188
1189         mutex_lock(&its_dev->event_map.vlpi_lock);
1190
1191         if (!its_dev->event_map.vm ||
1192             !its_dev->event_map.vlpi_maps[event].vm) {
1193                 ret = -EINVAL;
1194                 goto out;
1195         }
1196
1197         /* Copy our mapping information to the incoming request */
1198         *info->map = its_dev->event_map.vlpi_maps[event];
1199
1200 out:
1201         mutex_unlock(&its_dev->event_map.vlpi_lock);
1202         return ret;
1203 }
1204
1205 static int its_vlpi_unmap(struct irq_data *d)
1206 {
1207         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1208         u32 event = its_get_event_id(d);
1209         int ret = 0;
1210
1211         mutex_lock(&its_dev->event_map.vlpi_lock);
1212
1213         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1214                 ret = -EINVAL;
1215                 goto out;
1216         }
1217
1218         /* Drop the virtual mapping */
1219         its_send_discard(its_dev, event);
1220
1221         /* and restore the physical one */
1222         irqd_clr_forwarded_to_vcpu(d);
1223         its_send_mapti(its_dev, d->hwirq, event);
1224         lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1225                                     LPI_PROP_ENABLED |
1226                                     LPI_PROP_GROUP1));
1227
1228         /*
1229          * Drop the refcount and make the device available again if
1230          * this was the last VLPI.
1231          */
1232         if (!--its_dev->event_map.nr_vlpis) {
1233                 its_dev->event_map.vm = NULL;
1234                 kfree(its_dev->event_map.vlpi_maps);
1235         }
1236
1237 out:
1238         mutex_unlock(&its_dev->event_map.vlpi_lock);
1239         return ret;
1240 }
1241
1242 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1243 {
1244         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1245
1246         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1247                 return -EINVAL;
1248
1249         if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1250                 lpi_update_config(d, 0xff, info->config);
1251         else
1252                 lpi_write_config(d, 0xff, info->config);
1253         its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1254
1255         return 0;
1256 }
1257
1258 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1259 {
1260         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1261         struct its_cmd_info *info = vcpu_info;
1262
1263         /* Need a v4 ITS */
1264         if (!its_dev->its->is_v4)
1265                 return -EINVAL;
1266
1267         /* Unmap request? */
1268         if (!info)
1269                 return its_vlpi_unmap(d);
1270
1271         switch (info->cmd_type) {
1272         case MAP_VLPI:
1273                 return its_vlpi_map(d, info);
1274
1275         case GET_VLPI:
1276                 return its_vlpi_get(d, info);
1277
1278         case PROP_UPDATE_VLPI:
1279         case PROP_UPDATE_AND_INV_VLPI:
1280                 return its_vlpi_prop_update(d, info);
1281
1282         default:
1283                 return -EINVAL;
1284         }
1285 }
1286
1287 static struct irq_chip its_irq_chip = {
1288         .name                   = "ITS",
1289         .irq_mask               = its_mask_irq,
1290         .irq_unmask             = its_unmask_irq,
1291         .irq_eoi                = irq_chip_eoi_parent,
1292         .irq_set_affinity       = its_set_affinity,
1293         .irq_compose_msi_msg    = its_irq_compose_msi_msg,
1294         .irq_set_irqchip_state  = its_irq_set_irqchip_state,
1295         .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
1296 };
1297
1298 /*
1299  * How we allocate LPIs:
1300  *
1301  * The GIC has id_bits bits for interrupt identifiers. From there, we
1302  * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
1303  * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
1304  * bits to the right.
1305  *
1306  * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1307  */
1308 #define IRQS_PER_CHUNK_SHIFT    5
1309 #define IRQS_PER_CHUNK          (1 << IRQS_PER_CHUNK_SHIFT)
1310 #define ITS_MAX_LPI_NRBITS      16 /* 64K LPIs */
1311
1312 static unsigned long *lpi_bitmap;
1313 static u32 lpi_chunks;
1314 static DEFINE_SPINLOCK(lpi_lock);
1315
1316 static int its_lpi_to_chunk(int lpi)
1317 {
1318         return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1319 }
1320
1321 static int its_chunk_to_lpi(int chunk)
1322 {
1323         return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1324 }
1325
1326 static int __init its_lpi_init(u32 id_bits)
1327 {
1328         lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1329
1330         lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
1331                              GFP_KERNEL);
1332         if (!lpi_bitmap) {
1333                 lpi_chunks = 0;
1334                 return -ENOMEM;
1335         }
1336
1337         pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1338         return 0;
1339 }
1340
1341 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1342 {
1343         unsigned long *bitmap = NULL;
1344         int chunk_id;
1345         int nr_chunks;
1346         int i;
1347
1348         nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1349
1350         spin_lock(&lpi_lock);
1351
1352         do {
1353                 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1354                                                       0, nr_chunks, 0);
1355                 if (chunk_id < lpi_chunks)
1356                         break;
1357
1358                 nr_chunks--;
1359         } while (nr_chunks > 0);
1360
1361         if (!nr_chunks)
1362                 goto out;
1363
1364         bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
1365                          GFP_ATOMIC);
1366         if (!bitmap)
1367                 goto out;
1368
1369         for (i = 0; i < nr_chunks; i++)
1370                 set_bit(chunk_id + i, lpi_bitmap);
1371
1372         *base = its_chunk_to_lpi(chunk_id);
1373         *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1374
1375 out:
1376         spin_unlock(&lpi_lock);
1377
1378         if (!bitmap)
1379                 *base = *nr_ids = 0;
1380
1381         return bitmap;
1382 }
1383
1384 static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
1385 {
1386         int lpi;
1387
1388         spin_lock(&lpi_lock);
1389
1390         for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1391                 int chunk = its_lpi_to_chunk(lpi);
1392
1393                 BUG_ON(chunk > lpi_chunks);
1394                 if (test_bit(chunk, lpi_bitmap)) {
1395                         clear_bit(chunk, lpi_bitmap);
1396                 } else {
1397                         pr_err("Bad LPI chunk %d\n", chunk);
1398                 }
1399         }
1400
1401         spin_unlock(&lpi_lock);
1402
1403         kfree(bitmap);
1404 }
1405
1406 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1407 {
1408         struct page *prop_page;
1409
1410         prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1411         if (!prop_page)
1412                 return NULL;
1413
1414         /* Priority 0xa0, Group-1, disabled */
1415         memset(page_address(prop_page),
1416                LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1417                LPI_PROPBASE_SZ);
1418
1419         /* Make sure the GIC will observe the written configuration */
1420         gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1421
1422         return prop_page;
1423 }
1424
1425 static void its_free_prop_table(struct page *prop_page)
1426 {
1427         free_pages((unsigned long)page_address(prop_page),
1428                    get_order(LPI_PROPBASE_SZ));
1429 }
1430
1431 static int __init its_alloc_lpi_tables(void)
1432 {
1433         phys_addr_t paddr;
1434
1435         lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
1436         gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1437         if (!gic_rdists->prop_page) {
1438                 pr_err("Failed to allocate PROPBASE\n");
1439                 return -ENOMEM;
1440         }
1441
1442         paddr = page_to_phys(gic_rdists->prop_page);
1443         pr_info("GIC: using LPI property table @%pa\n", &paddr);
1444
1445         return its_lpi_init(lpi_id_bits);
1446 }
1447
1448 static const char *its_base_type_string[] = {
1449         [GITS_BASER_TYPE_DEVICE]        = "Devices",
1450         [GITS_BASER_TYPE_VCPU]          = "Virtual CPUs",
1451         [GITS_BASER_TYPE_RESERVED3]     = "Reserved (3)",
1452         [GITS_BASER_TYPE_COLLECTION]    = "Interrupt Collections",
1453         [GITS_BASER_TYPE_RESERVED5]     = "Reserved (5)",
1454         [GITS_BASER_TYPE_RESERVED6]     = "Reserved (6)",
1455         [GITS_BASER_TYPE_RESERVED7]     = "Reserved (7)",
1456 };
1457
1458 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1459 {
1460         u32 idx = baser - its->tables;
1461
1462         return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1463 }
1464
1465 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1466                             u64 val)
1467 {
1468         u32 idx = baser - its->tables;
1469
1470         gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1471         baser->val = its_read_baser(its, baser);
1472 }
1473
1474 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1475                            u64 cache, u64 shr, u32 psz, u32 order,
1476                            bool indirect)
1477 {
1478         u64 val = its_read_baser(its, baser);
1479         u64 esz = GITS_BASER_ENTRY_SIZE(val);
1480         u64 type = GITS_BASER_TYPE(val);
1481         u32 alloc_pages;
1482         void *base;
1483         u64 tmp;
1484
1485 retry_alloc_baser:
1486         alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1487         if (alloc_pages > GITS_BASER_PAGES_MAX) {
1488                 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1489                         &its->phys_base, its_base_type_string[type],
1490                         alloc_pages, GITS_BASER_PAGES_MAX);
1491                 alloc_pages = GITS_BASER_PAGES_MAX;
1492                 order = get_order(GITS_BASER_PAGES_MAX * psz);
1493         }
1494
1495         base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1496         if (!base)
1497                 return -ENOMEM;
1498
1499 retry_baser:
1500         val = (virt_to_phys(base)                                |
1501                 (type << GITS_BASER_TYPE_SHIFT)                  |
1502                 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
1503                 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
1504                 cache                                            |
1505                 shr                                              |
1506                 GITS_BASER_VALID);
1507
1508         val |=  indirect ? GITS_BASER_INDIRECT : 0x0;
1509
1510         switch (psz) {
1511         case SZ_4K:
1512                 val |= GITS_BASER_PAGE_SIZE_4K;
1513                 break;
1514         case SZ_16K:
1515                 val |= GITS_BASER_PAGE_SIZE_16K;
1516                 break;
1517         case SZ_64K:
1518                 val |= GITS_BASER_PAGE_SIZE_64K;
1519                 break;
1520         }
1521
1522         its_write_baser(its, baser, val);
1523         tmp = baser->val;
1524
1525         if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1526                 /*
1527                  * Shareability didn't stick. Just use
1528                  * whatever the read reported, which is likely
1529                  * to be the only thing this redistributor
1530                  * supports. If that's zero, make it
1531                  * non-cacheable as well.
1532                  */
1533                 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1534                 if (!shr) {
1535                         cache = GITS_BASER_nC;
1536                         gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1537                 }
1538                 goto retry_baser;
1539         }
1540
1541         if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1542                 /*
1543                  * Page size didn't stick. Let's try a smaller
1544                  * size and retry. If we reach 4K, then
1545                  * something is horribly wrong...
1546                  */
1547                 free_pages((unsigned long)base, order);
1548                 baser->base = NULL;
1549
1550                 switch (psz) {
1551                 case SZ_16K:
1552                         psz = SZ_4K;
1553                         goto retry_alloc_baser;
1554                 case SZ_64K:
1555                         psz = SZ_16K;
1556                         goto retry_alloc_baser;
1557                 }
1558         }
1559
1560         if (val != tmp) {
1561                 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1562                        &its->phys_base, its_base_type_string[type],
1563                        val, tmp);
1564                 free_pages((unsigned long)base, order);
1565                 return -ENXIO;
1566         }
1567
1568         baser->order = order;
1569         baser->base = base;
1570         baser->psz = psz;
1571         tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1572
1573         pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1574                 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1575                 its_base_type_string[type],
1576                 (unsigned long)virt_to_phys(base),
1577                 indirect ? "indirect" : "flat", (int)esz,
1578                 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1579
1580         return 0;
1581 }
1582
1583 static bool its_parse_indirect_baser(struct its_node *its,
1584                                      struct its_baser *baser,
1585                                      u32 psz, u32 *order)
1586 {
1587         u64 tmp = its_read_baser(its, baser);
1588         u64 type = GITS_BASER_TYPE(tmp);
1589         u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1590         u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1591         u32 ids = its->device_ids;
1592         u32 new_order = *order;
1593         bool indirect = false;
1594
1595         /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1596         if ((esz << ids) > (psz * 2)) {
1597                 /*
1598                  * Find out whether hw supports a single or two-level table by
1599                  * table by reading bit at offset '62' after writing '1' to it.
1600                  */
1601                 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1602                 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1603
1604                 if (indirect) {
1605                         /*
1606                          * The size of the lvl2 table is equal to ITS page size
1607                          * which is 'psz'. For computing lvl1 table size,
1608                          * subtract ID bits that sparse lvl2 table from 'ids'
1609                          * which is reported by ITS hardware times lvl1 table
1610                          * entry size.
1611                          */
1612                         ids -= ilog2(psz / (int)esz);
1613                         esz = GITS_LVL1_ENTRY_SIZE;
1614                 }
1615         }
1616
1617         /*
1618          * Allocate as many entries as required to fit the
1619          * range of device IDs that the ITS can grok... The ID
1620          * space being incredibly sparse, this results in a
1621          * massive waste of memory if two-level device table
1622          * feature is not supported by hardware.
1623          */
1624         new_order = max_t(u32, get_order(esz << ids), new_order);
1625         if (new_order >= MAX_ORDER) {
1626                 new_order = MAX_ORDER - 1;
1627                 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1628                 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1629                         &its->phys_base, its_base_type_string[type],
1630                         its->device_ids, ids);
1631         }
1632
1633         *order = new_order;
1634
1635         return indirect;
1636 }
1637
1638 static void its_free_tables(struct its_node *its)
1639 {
1640         int i;
1641
1642         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1643                 if (its->tables[i].base) {
1644                         free_pages((unsigned long)its->tables[i].base,
1645                                    its->tables[i].order);
1646                         its->tables[i].base = NULL;
1647                 }
1648         }
1649 }
1650
1651 static int its_alloc_tables(struct its_node *its)
1652 {
1653         u64 typer = gic_read_typer(its->base + GITS_TYPER);
1654         u32 ids = GITS_TYPER_DEVBITS(typer);
1655         u64 shr = GITS_BASER_InnerShareable;
1656         u64 cache = GITS_BASER_RaWaWb;
1657         u32 psz = SZ_64K;
1658         int err, i;
1659
1660         if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
1661                 /*
1662                 * erratum 22375: only alloc 8MB table size
1663                 * erratum 24313: ignore memory access type
1664                 */
1665                 cache   = GITS_BASER_nCnB;
1666                 ids     = 0x14;                 /* 20 bits, 8MB */
1667         }
1668
1669         its->device_ids = ids;
1670
1671         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1672                 struct its_baser *baser = its->tables + i;
1673                 u64 val = its_read_baser(its, baser);
1674                 u64 type = GITS_BASER_TYPE(val);
1675                 u32 order = get_order(psz);
1676                 bool indirect = false;
1677
1678                 switch (type) {
1679                 case GITS_BASER_TYPE_NONE:
1680                         continue;
1681
1682                 case GITS_BASER_TYPE_DEVICE:
1683                 case GITS_BASER_TYPE_VCPU:
1684                         indirect = its_parse_indirect_baser(its, baser,
1685                                                             psz, &order);
1686                         break;
1687                 }
1688
1689                 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1690                 if (err < 0) {
1691                         its_free_tables(its);
1692                         return err;
1693                 }
1694
1695                 /* Update settings which will be used for next BASERn */
1696                 psz = baser->psz;
1697                 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1698                 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1699         }
1700
1701         return 0;
1702 }
1703
1704 static int its_alloc_collections(struct its_node *its)
1705 {
1706         its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1707                                    GFP_KERNEL);
1708         if (!its->collections)
1709                 return -ENOMEM;
1710
1711         return 0;
1712 }
1713
1714 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1715 {
1716         struct page *pend_page;
1717         /*
1718          * The pending pages have to be at least 64kB aligned,
1719          * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1720          */
1721         pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1722                                 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1723         if (!pend_page)
1724                 return NULL;
1725
1726         /* Make sure the GIC will observe the zero-ed page */
1727         gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1728
1729         return pend_page;
1730 }
1731
1732 static void its_free_pending_table(struct page *pt)
1733 {
1734         free_pages((unsigned long)page_address(pt),
1735                    get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1736 }
1737
1738 static void its_cpu_init_lpis(void)
1739 {
1740         void __iomem *rbase = gic_data_rdist_rd_base();
1741         struct page *pend_page;
1742         u64 val, tmp;
1743
1744         /* If we didn't allocate the pending table yet, do it now */
1745         pend_page = gic_data_rdist()->pend_page;
1746         if (!pend_page) {
1747                 phys_addr_t paddr;
1748
1749                 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1750                 if (!pend_page) {
1751                         pr_err("Failed to allocate PENDBASE for CPU%d\n",
1752                                smp_processor_id());
1753                         return;
1754                 }
1755
1756                 paddr = page_to_phys(pend_page);
1757                 pr_info("CPU%d: using LPI pending table @%pa\n",
1758                         smp_processor_id(), &paddr);
1759                 gic_data_rdist()->pend_page = pend_page;
1760         }
1761
1762         /* Disable LPIs */
1763         val = readl_relaxed(rbase + GICR_CTLR);
1764         val &= ~GICR_CTLR_ENABLE_LPIS;
1765         writel_relaxed(val, rbase + GICR_CTLR);
1766
1767         /*
1768          * Make sure any change to the table is observable by the GIC.
1769          */
1770         dsb(sy);
1771
1772         /* set PROPBASE */
1773         val = (page_to_phys(gic_rdists->prop_page) |
1774                GICR_PROPBASER_InnerShareable |
1775                GICR_PROPBASER_RaWaWb |
1776                ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1777
1778         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1779         tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
1780
1781         if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1782                 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1783                         /*
1784                          * The HW reports non-shareable, we must
1785                          * remove the cacheability attributes as
1786                          * well.
1787                          */
1788                         val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1789                                  GICR_PROPBASER_CACHEABILITY_MASK);
1790                         val |= GICR_PROPBASER_nC;
1791                         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1792                 }
1793                 pr_info_once("GIC: using cache flushing for LPI property table\n");
1794                 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1795         }
1796
1797         /* set PENDBASE */
1798         val = (page_to_phys(pend_page) |
1799                GICR_PENDBASER_InnerShareable |
1800                GICR_PENDBASER_RaWaWb);
1801
1802         gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1803         tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
1804
1805         if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1806                 /*
1807                  * The HW reports non-shareable, we must remove the
1808                  * cacheability attributes as well.
1809                  */
1810                 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1811                          GICR_PENDBASER_CACHEABILITY_MASK);
1812                 val |= GICR_PENDBASER_nC;
1813                 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1814         }
1815
1816         /* Enable LPIs */
1817         val = readl_relaxed(rbase + GICR_CTLR);
1818         val |= GICR_CTLR_ENABLE_LPIS;
1819         writel_relaxed(val, rbase + GICR_CTLR);
1820
1821         /* Make sure the GIC has seen the above */
1822         dsb(sy);
1823 }
1824
1825 static void its_cpu_init_collection(void)
1826 {
1827         struct its_node *its;
1828         int cpu;
1829
1830         spin_lock(&its_lock);
1831         cpu = smp_processor_id();
1832
1833         list_for_each_entry(its, &its_nodes, entry) {
1834                 u64 target;
1835
1836                 /* avoid cross node collections and its mapping */
1837                 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1838                         struct device_node *cpu_node;
1839
1840                         cpu_node = of_get_cpu_node(cpu, NULL);
1841                         if (its->numa_node != NUMA_NO_NODE &&
1842                                 its->numa_node != of_node_to_nid(cpu_node))
1843                                 continue;
1844                 }
1845
1846                 /*
1847                  * We now have to bind each collection to its target
1848                  * redistributor.
1849                  */
1850                 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1851                         /*
1852                          * This ITS wants the physical address of the
1853                          * redistributor.
1854                          */
1855                         target = gic_data_rdist()->phys_base;
1856                 } else {
1857                         /*
1858                          * This ITS wants a linear CPU number.
1859                          */
1860                         target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1861                         target = GICR_TYPER_CPU_NUMBER(target) << 16;
1862                 }
1863
1864                 /* Perform collection mapping */
1865                 its->collections[cpu].target_address = target;
1866                 its->collections[cpu].col_id = cpu;
1867
1868                 its_send_mapc(its, &its->collections[cpu], 1);
1869                 its_send_invall(its, &its->collections[cpu]);
1870         }
1871
1872         spin_unlock(&its_lock);
1873 }
1874
1875 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1876 {
1877         struct its_device *its_dev = NULL, *tmp;
1878         unsigned long flags;
1879
1880         raw_spin_lock_irqsave(&its->lock, flags);
1881
1882         list_for_each_entry(tmp, &its->its_device_list, entry) {
1883                 if (tmp->device_id == dev_id) {
1884                         its_dev = tmp;
1885                         break;
1886                 }
1887         }
1888
1889         raw_spin_unlock_irqrestore(&its->lock, flags);
1890
1891         return its_dev;
1892 }
1893
1894 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1895 {
1896         int i;
1897
1898         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1899                 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1900                         return &its->tables[i];
1901         }
1902
1903         return NULL;
1904 }
1905
1906 static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
1907 {
1908         struct page *page;
1909         u32 esz, idx;
1910         __le64 *table;
1911
1912         /* Don't allow device id that exceeds single, flat table limit */
1913         esz = GITS_BASER_ENTRY_SIZE(baser->val);
1914         if (!(baser->val & GITS_BASER_INDIRECT))
1915                 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1916
1917         /* Compute 1st level table index & check if that exceeds table limit */
1918         idx = id >> ilog2(baser->psz / esz);
1919         if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1920                 return false;
1921
1922         table = baser->base;
1923
1924         /* Allocate memory for 2nd level table */
1925         if (!table[idx]) {
1926                 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
1927                 if (!page)
1928                         return false;
1929
1930                 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1931                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1932                         gic_flush_dcache_to_poc(page_address(page), baser->psz);
1933
1934                 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
1935
1936                 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1937                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1938                         gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
1939
1940                 /* Ensure updated table contents are visible to ITS hardware */
1941                 dsb(sy);
1942         }
1943
1944         return true;
1945 }
1946
1947 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1948 {
1949         struct its_baser *baser;
1950
1951         baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1952
1953         /* Don't allow device id that exceeds ITS hardware limit */
1954         if (!baser)
1955                 return (ilog2(dev_id) < its->device_ids);
1956
1957         return its_alloc_table_entry(baser, dev_id);
1958 }
1959
1960 static bool its_alloc_vpe_table(u32 vpe_id)
1961 {
1962         struct its_node *its;
1963
1964         /*
1965          * Make sure the L2 tables are allocated on *all* v4 ITSs. We
1966          * could try and only do it on ITSs corresponding to devices
1967          * that have interrupts targeted at this VPE, but the
1968          * complexity becomes crazy (and you have tons of memory
1969          * anyway, right?).
1970          */
1971         list_for_each_entry(its, &its_nodes, entry) {
1972                 struct its_baser *baser;
1973
1974                 if (!its->is_v4)
1975                         continue;
1976
1977                 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
1978                 if (!baser)
1979                         return false;
1980
1981                 if (!its_alloc_table_entry(baser, vpe_id))
1982                         return false;
1983         }
1984
1985         return true;
1986 }
1987
1988 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1989                                             int nvecs, bool alloc_lpis)
1990 {
1991         struct its_device *dev;
1992         unsigned long *lpi_map = NULL;
1993         unsigned long flags;
1994         u16 *col_map = NULL;
1995         void *itt;
1996         int lpi_base;
1997         int nr_lpis;
1998         int nr_ites;
1999         int sz;
2000
2001         if (!its_alloc_device_table(its, dev_id))
2002                 return NULL;
2003
2004         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2005         /*
2006          * At least one bit of EventID is being used, hence a minimum
2007          * of two entries. No, the architecture doesn't let you
2008          * express an ITT with a single entry.
2009          */
2010         nr_ites = max(2UL, roundup_pow_of_two(nvecs));
2011         sz = nr_ites * its->ite_size;
2012         sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2013         itt = kzalloc(sz, GFP_KERNEL);
2014         if (alloc_lpis) {
2015                 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2016                 if (lpi_map)
2017                         col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2018                                           GFP_KERNEL);
2019         } else {
2020                 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2021                 nr_lpis = 0;
2022                 lpi_base = 0;
2023         }
2024
2025         if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
2026                 kfree(dev);
2027                 kfree(itt);
2028                 kfree(lpi_map);
2029                 kfree(col_map);
2030                 return NULL;
2031         }
2032
2033         gic_flush_dcache_to_poc(itt, sz);
2034
2035         dev->its = its;
2036         dev->itt = itt;
2037         dev->nr_ites = nr_ites;
2038         dev->event_map.lpi_map = lpi_map;
2039         dev->event_map.col_map = col_map;
2040         dev->event_map.lpi_base = lpi_base;
2041         dev->event_map.nr_lpis = nr_lpis;
2042         mutex_init(&dev->event_map.vlpi_lock);
2043         dev->device_id = dev_id;
2044         INIT_LIST_HEAD(&dev->entry);
2045
2046         raw_spin_lock_irqsave(&its->lock, flags);
2047         list_add(&dev->entry, &its->its_device_list);
2048         raw_spin_unlock_irqrestore(&its->lock, flags);
2049
2050         /* Map device to its ITT */
2051         its_send_mapd(dev, 1);
2052
2053         return dev;
2054 }
2055
2056 static void its_free_device(struct its_device *its_dev)
2057 {
2058         unsigned long flags;
2059
2060         raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2061         list_del(&its_dev->entry);
2062         raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2063         kfree(its_dev->itt);
2064         kfree(its_dev);
2065 }
2066
2067 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2068 {
2069         int idx;
2070
2071         idx = find_first_zero_bit(dev->event_map.lpi_map,
2072                                   dev->event_map.nr_lpis);
2073         if (idx == dev->event_map.nr_lpis)
2074                 return -ENOSPC;
2075
2076         *hwirq = dev->event_map.lpi_base + idx;
2077         set_bit(idx, dev->event_map.lpi_map);
2078
2079         return 0;
2080 }
2081
2082 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2083                            int nvec, msi_alloc_info_t *info)
2084 {
2085         struct its_node *its;
2086         struct its_device *its_dev;
2087         struct msi_domain_info *msi_info;
2088         u32 dev_id;
2089
2090         /*
2091          * We ignore "dev" entierely, and rely on the dev_id that has
2092          * been passed via the scratchpad. This limits this domain's
2093          * usefulness to upper layers that definitely know that they
2094          * are built on top of the ITS.
2095          */
2096         dev_id = info->scratchpad[0].ul;
2097
2098         msi_info = msi_get_domain_info(domain);
2099         its = msi_info->data;
2100
2101         if (!gic_rdists->has_direct_lpi &&
2102             vpe_proxy.dev &&
2103             vpe_proxy.dev->its == its &&
2104             dev_id == vpe_proxy.dev->device_id) {
2105                 /* Bad luck. Get yourself a better implementation */
2106                 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2107                           dev_id);
2108                 return -EINVAL;
2109         }
2110
2111         its_dev = its_find_device(its, dev_id);
2112         if (its_dev) {
2113                 /*
2114                  * We already have seen this ID, probably through
2115                  * another alias (PCI bridge of some sort). No need to
2116                  * create the device.
2117                  */
2118                 pr_debug("Reusing ITT for devID %x\n", dev_id);
2119                 goto out;
2120         }
2121
2122         its_dev = its_create_device(its, dev_id, nvec, true);
2123         if (!its_dev)
2124                 return -ENOMEM;
2125
2126         pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2127 out:
2128         info->scratchpad[0].ptr = its_dev;
2129         return 0;
2130 }
2131
2132 static struct msi_domain_ops its_msi_domain_ops = {
2133         .msi_prepare    = its_msi_prepare,
2134 };
2135
2136 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2137                                     unsigned int virq,
2138                                     irq_hw_number_t hwirq)
2139 {
2140         struct irq_fwspec fwspec;
2141
2142         if (irq_domain_get_of_node(domain->parent)) {
2143                 fwspec.fwnode = domain->parent->fwnode;
2144                 fwspec.param_count = 3;
2145                 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2146                 fwspec.param[1] = hwirq;
2147                 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2148         } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2149                 fwspec.fwnode = domain->parent->fwnode;
2150                 fwspec.param_count = 2;
2151                 fwspec.param[0] = hwirq;
2152                 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2153         } else {
2154                 return -EINVAL;
2155         }
2156
2157         return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2158 }
2159
2160 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2161                                 unsigned int nr_irqs, void *args)
2162 {
2163         msi_alloc_info_t *info = args;
2164         struct its_device *its_dev = info->scratchpad[0].ptr;
2165         irq_hw_number_t hwirq;
2166         int err;
2167         int i;
2168
2169         for (i = 0; i < nr_irqs; i++) {
2170                 err = its_alloc_device_irq(its_dev, &hwirq);
2171                 if (err)
2172                         return err;
2173
2174                 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2175                 if (err)
2176                         return err;
2177
2178                 irq_domain_set_hwirq_and_chip(domain, virq + i,
2179                                               hwirq, &its_irq_chip, its_dev);
2180                 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2181                 pr_debug("ID:%d pID:%d vID:%d\n",
2182                          (int)(hwirq - its_dev->event_map.lpi_base),
2183                          (int) hwirq, virq + i);
2184         }
2185
2186         return 0;
2187 }
2188
2189 static void its_irq_domain_activate(struct irq_domain *domain,
2190                                     struct irq_data *d)
2191 {
2192         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2193         u32 event = its_get_event_id(d);
2194         const struct cpumask *cpu_mask = cpu_online_mask;
2195         int cpu;
2196
2197         /* get the cpu_mask of local node */
2198         if (its_dev->its->numa_node >= 0)
2199                 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2200
2201         /* Bind the LPI to the first possible CPU */
2202         cpu = cpumask_first(cpu_mask);
2203         its_dev->event_map.col_map[event] = cpu;
2204         irq_data_update_effective_affinity(d, cpumask_of(cpu));
2205
2206         /* Map the GIC IRQ and event to the device */
2207         its_send_mapti(its_dev, d->hwirq, event);
2208 }
2209
2210 static void its_irq_domain_deactivate(struct irq_domain *domain,
2211                                       struct irq_data *d)
2212 {
2213         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2214         u32 event = its_get_event_id(d);
2215
2216         /* Stop the delivery of interrupts */
2217         its_send_discard(its_dev, event);
2218 }
2219
2220 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2221                                 unsigned int nr_irqs)
2222 {
2223         struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2224         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2225         int i;
2226
2227         for (i = 0; i < nr_irqs; i++) {
2228                 struct irq_data *data = irq_domain_get_irq_data(domain,
2229                                                                 virq + i);
2230                 u32 event = its_get_event_id(data);
2231
2232                 /* Mark interrupt index as unused */
2233                 clear_bit(event, its_dev->event_map.lpi_map);
2234
2235                 /* Nuke the entry in the domain */
2236                 irq_domain_reset_irq_data(data);
2237         }
2238
2239         /* If all interrupts have been freed, start mopping the floor */
2240         if (bitmap_empty(its_dev->event_map.lpi_map,
2241                          its_dev->event_map.nr_lpis)) {
2242                 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2243                                     its_dev->event_map.lpi_base,
2244                                     its_dev->event_map.nr_lpis);
2245                 kfree(its_dev->event_map.col_map);
2246
2247                 /* Unmap device/itt */
2248                 its_send_mapd(its_dev, 0);
2249                 its_free_device(its_dev);
2250         }
2251
2252         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2253 }
2254
2255 static const struct irq_domain_ops its_domain_ops = {
2256         .alloc                  = its_irq_domain_alloc,
2257         .free                   = its_irq_domain_free,
2258         .activate               = its_irq_domain_activate,
2259         .deactivate             = its_irq_domain_deactivate,
2260 };
2261
2262 /*
2263  * This is insane.
2264  *
2265  * If a GICv4 doesn't implement Direct LPIs (which is extremely
2266  * likely), the only way to perform an invalidate is to use a fake
2267  * device to issue an INV command, implying that the LPI has first
2268  * been mapped to some event on that device. Since this is not exactly
2269  * cheap, we try to keep that mapping around as long as possible, and
2270  * only issue an UNMAP if we're short on available slots.
2271  *
2272  * Broken by design(tm).
2273  */
2274 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2275 {
2276         /* Already unmapped? */
2277         if (vpe->vpe_proxy_event == -1)
2278                 return;
2279
2280         its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2281         vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2282
2283         /*
2284          * We don't track empty slots at all, so let's move the
2285          * next_victim pointer if we can quickly reuse that slot
2286          * instead of nuking an existing entry. Not clear that this is
2287          * always a win though, and this might just generate a ripple
2288          * effect... Let's just hope VPEs don't migrate too often.
2289          */
2290         if (vpe_proxy.vpes[vpe_proxy.next_victim])
2291                 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2292
2293         vpe->vpe_proxy_event = -1;
2294 }
2295
2296 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2297 {
2298         if (!gic_rdists->has_direct_lpi) {
2299                 unsigned long flags;
2300
2301                 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2302                 its_vpe_db_proxy_unmap_locked(vpe);
2303                 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2304         }
2305 }
2306
2307 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2308 {
2309         /* Already mapped? */
2310         if (vpe->vpe_proxy_event != -1)
2311                 return;
2312
2313         /* This slot was already allocated. Kick the other VPE out. */
2314         if (vpe_proxy.vpes[vpe_proxy.next_victim])
2315                 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2316
2317         /* Map the new VPE instead */
2318         vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2319         vpe->vpe_proxy_event = vpe_proxy.next_victim;
2320         vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2321
2322         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2323         its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2324 }
2325
2326 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2327 {
2328         unsigned long flags;
2329         struct its_collection *target_col;
2330
2331         if (gic_rdists->has_direct_lpi) {
2332                 void __iomem *rdbase;
2333
2334                 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2335                 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2336                 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2337                         cpu_relax();
2338
2339                 return;
2340         }
2341
2342         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2343
2344         its_vpe_db_proxy_map_locked(vpe);
2345
2346         target_col = &vpe_proxy.dev->its->collections[to];
2347         its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2348         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2349
2350         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2351 }
2352
2353 static int its_vpe_set_affinity(struct irq_data *d,
2354                                 const struct cpumask *mask_val,
2355                                 bool force)
2356 {
2357         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2358         int cpu = cpumask_first(mask_val);
2359
2360         /*
2361          * Changing affinity is mega expensive, so let's be as lazy as
2362          * we can and only do it if we really have to. Also, if mapped
2363          * into the proxy device, we need to move the doorbell
2364          * interrupt to its new location.
2365          */
2366         if (vpe->col_idx != cpu) {
2367                 int from = vpe->col_idx;
2368
2369                 vpe->col_idx = cpu;
2370                 its_send_vmovp(vpe);
2371                 its_vpe_db_proxy_move(vpe, from, cpu);
2372         }
2373
2374         return IRQ_SET_MASK_OK_DONE;
2375 }
2376
2377 static void its_vpe_schedule(struct its_vpe *vpe)
2378 {
2379         void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2380         u64 val;
2381
2382         /* Schedule the VPE */
2383         val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2384                 GENMASK_ULL(51, 12);
2385         val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2386         val |= GICR_VPROPBASER_RaWb;
2387         val |= GICR_VPROPBASER_InnerShareable;
2388         gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2389
2390         val  = virt_to_phys(page_address(vpe->vpt_page)) &
2391                 GENMASK_ULL(51, 16);
2392         val |= GICR_VPENDBASER_RaWaWb;
2393         val |= GICR_VPENDBASER_NonShareable;
2394         /*
2395          * There is no good way of finding out if the pending table is
2396          * empty as we can race against the doorbell interrupt very
2397          * easily. So in the end, vpe->pending_last is only an
2398          * indication that the vcpu has something pending, not one
2399          * that the pending table is empty. A good implementation
2400          * would be able to read its coarse map pretty quickly anyway,
2401          * making this a tolerable issue.
2402          */
2403         val |= GICR_VPENDBASER_PendingLast;
2404         val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2405         val |= GICR_VPENDBASER_Valid;
2406         gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2407 }
2408
2409 static void its_vpe_deschedule(struct its_vpe *vpe)
2410 {
2411         void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
2412         u32 count = 1000000;    /* 1s! */
2413         bool clean;
2414         u64 val;
2415
2416         /* We're being scheduled out */
2417         val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2418         val &= ~GICR_VPENDBASER_Valid;
2419         gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2420
2421         do {
2422                 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2423                 clean = !(val & GICR_VPENDBASER_Dirty);
2424                 if (!clean) {
2425                         count--;
2426                         cpu_relax();
2427                         udelay(1);
2428                 }
2429         } while (!clean && count);
2430
2431         if (unlikely(!clean && !count)) {
2432                 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2433                 vpe->idai = false;
2434                 vpe->pending_last = true;
2435         } else {
2436                 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2437                 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2438         }
2439 }
2440
2441 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2442 {
2443         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2444         struct its_cmd_info *info = vcpu_info;
2445
2446         switch (info->cmd_type) {
2447         case SCHEDULE_VPE:
2448                 its_vpe_schedule(vpe);
2449                 return 0;
2450
2451         case DESCHEDULE_VPE:
2452                 its_vpe_deschedule(vpe);
2453                 return 0;
2454
2455         case INVALL_VPE:
2456                 its_send_vinvall(vpe);
2457                 return 0;
2458
2459         default:
2460                 return -EINVAL;
2461         }
2462 }
2463
2464 static void its_vpe_send_cmd(struct its_vpe *vpe,
2465                              void (*cmd)(struct its_device *, u32))
2466 {
2467         unsigned long flags;
2468
2469         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2470
2471         its_vpe_db_proxy_map_locked(vpe);
2472         cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2473
2474         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2475 }
2476
2477 static void its_vpe_send_inv(struct irq_data *d)
2478 {
2479         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2480
2481         if (gic_rdists->has_direct_lpi) {
2482                 void __iomem *rdbase;
2483
2484                 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2485                 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2486                 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2487                         cpu_relax();
2488         } else {
2489                 its_vpe_send_cmd(vpe, its_send_inv);
2490         }
2491 }
2492
2493 static void its_vpe_mask_irq(struct irq_data *d)
2494 {
2495         /*
2496          * We need to unmask the LPI, which is described by the parent
2497          * irq_data. Instead of calling into the parent (which won't
2498          * exactly do the right thing, let's simply use the
2499          * parent_data pointer. Yes, I'm naughty.
2500          */
2501         lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2502         its_vpe_send_inv(d);
2503 }
2504
2505 static void its_vpe_unmask_irq(struct irq_data *d)
2506 {
2507         /* Same hack as above... */
2508         lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2509         its_vpe_send_inv(d);
2510 }
2511
2512 static int its_vpe_set_irqchip_state(struct irq_data *d,
2513                                      enum irqchip_irq_state which,
2514                                      bool state)
2515 {
2516         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2517
2518         if (which != IRQCHIP_STATE_PENDING)
2519                 return -EINVAL;
2520
2521         if (gic_rdists->has_direct_lpi) {
2522                 void __iomem *rdbase;
2523
2524                 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2525                 if (state) {
2526                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2527                 } else {
2528                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2529                         while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2530                                 cpu_relax();
2531                 }
2532         } else {
2533                 if (state)
2534                         its_vpe_send_cmd(vpe, its_send_int);
2535                 else
2536                         its_vpe_send_cmd(vpe, its_send_clear);
2537         }
2538
2539         return 0;
2540 }
2541
2542 static struct irq_chip its_vpe_irq_chip = {
2543         .name                   = "GICv4-vpe",
2544         .irq_mask               = its_vpe_mask_irq,
2545         .irq_unmask             = its_vpe_unmask_irq,
2546         .irq_eoi                = irq_chip_eoi_parent,
2547         .irq_set_affinity       = its_vpe_set_affinity,
2548         .irq_set_irqchip_state  = its_vpe_set_irqchip_state,
2549         .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
2550 };
2551
2552 static int its_vpe_id_alloc(void)
2553 {
2554         return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
2555 }
2556
2557 static void its_vpe_id_free(u16 id)
2558 {
2559         ida_simple_remove(&its_vpeid_ida, id);
2560 }
2561
2562 static int its_vpe_init(struct its_vpe *vpe)
2563 {
2564         struct page *vpt_page;
2565         int vpe_id;
2566
2567         /* Allocate vpe_id */
2568         vpe_id = its_vpe_id_alloc();
2569         if (vpe_id < 0)
2570                 return vpe_id;
2571
2572         /* Allocate VPT */
2573         vpt_page = its_allocate_pending_table(GFP_KERNEL);
2574         if (!vpt_page) {
2575                 its_vpe_id_free(vpe_id);
2576                 return -ENOMEM;
2577         }
2578
2579         if (!its_alloc_vpe_table(vpe_id)) {
2580                 its_vpe_id_free(vpe_id);
2581                 its_free_pending_table(vpe->vpt_page);
2582                 return -ENOMEM;
2583         }
2584
2585         vpe->vpe_id = vpe_id;
2586         vpe->vpt_page = vpt_page;
2587         vpe->vpe_proxy_event = -1;
2588
2589         return 0;
2590 }
2591
2592 static void its_vpe_teardown(struct its_vpe *vpe)
2593 {
2594         its_vpe_db_proxy_unmap(vpe);
2595         its_vpe_id_free(vpe->vpe_id);
2596         its_free_pending_table(vpe->vpt_page);
2597 }
2598
2599 static void its_vpe_irq_domain_free(struct irq_domain *domain,
2600                                     unsigned int virq,
2601                                     unsigned int nr_irqs)
2602 {
2603         struct its_vm *vm = domain->host_data;
2604         int i;
2605
2606         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2607
2608         for (i = 0; i < nr_irqs; i++) {
2609                 struct irq_data *data = irq_domain_get_irq_data(domain,
2610                                                                 virq + i);
2611                 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2612
2613                 BUG_ON(vm != vpe->its_vm);
2614
2615                 clear_bit(data->hwirq, vm->db_bitmap);
2616                 its_vpe_teardown(vpe);
2617                 irq_domain_reset_irq_data(data);
2618         }
2619
2620         if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2621                 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2622                 its_free_prop_table(vm->vprop_page);
2623         }
2624 }
2625
2626 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2627                                     unsigned int nr_irqs, void *args)
2628 {
2629         struct its_vm *vm = args;
2630         unsigned long *bitmap;
2631         struct page *vprop_page;
2632         int base, nr_ids, i, err = 0;
2633
2634         BUG_ON(!vm);
2635
2636         bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2637         if (!bitmap)
2638                 return -ENOMEM;
2639
2640         if (nr_ids < nr_irqs) {
2641                 its_lpi_free_chunks(bitmap, base, nr_ids);
2642                 return -ENOMEM;
2643         }
2644
2645         vprop_page = its_allocate_prop_table(GFP_KERNEL);
2646         if (!vprop_page) {
2647                 its_lpi_free_chunks(bitmap, base, nr_ids);
2648                 return -ENOMEM;
2649         }
2650
2651         vm->db_bitmap = bitmap;
2652         vm->db_lpi_base = base;
2653         vm->nr_db_lpis = nr_ids;
2654         vm->vprop_page = vprop_page;
2655
2656         for (i = 0; i < nr_irqs; i++) {
2657                 vm->vpes[i]->vpe_db_lpi = base + i;
2658                 err = its_vpe_init(vm->vpes[i]);
2659                 if (err)
2660                         break;
2661                 err = its_irq_gic_domain_alloc(domain, virq + i,
2662                                                vm->vpes[i]->vpe_db_lpi);
2663                 if (err)
2664                         break;
2665                 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2666                                               &its_vpe_irq_chip, vm->vpes[i]);
2667                 set_bit(i, bitmap);
2668         }
2669
2670         if (err) {
2671                 if (i > 0)
2672                         its_vpe_irq_domain_free(domain, virq, i - 1);
2673
2674                 its_lpi_free_chunks(bitmap, base, nr_ids);
2675                 its_free_prop_table(vprop_page);
2676         }
2677
2678         return err;
2679 }
2680
2681 static void its_vpe_irq_domain_activate(struct irq_domain *domain,
2682                                         struct irq_data *d)
2683 {
2684         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2685
2686         /* Map the VPE to the first possible CPU */
2687         vpe->col_idx = cpumask_first(cpu_online_mask);
2688         its_send_vmapp(vpe, true);
2689         its_send_vinvall(vpe);
2690 }
2691
2692 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2693                                           struct irq_data *d)
2694 {
2695         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2696
2697         its_send_vmapp(vpe, false);
2698 }
2699
2700 static const struct irq_domain_ops its_vpe_domain_ops = {
2701         .alloc                  = its_vpe_irq_domain_alloc,
2702         .free                   = its_vpe_irq_domain_free,
2703         .activate               = its_vpe_irq_domain_activate,
2704         .deactivate             = its_vpe_irq_domain_deactivate,
2705 };
2706
2707 static int its_force_quiescent(void __iomem *base)
2708 {
2709         u32 count = 1000000;    /* 1s */
2710         u32 val;
2711
2712         val = readl_relaxed(base + GITS_CTLR);
2713         /*
2714          * GIC architecture specification requires the ITS to be both
2715          * disabled and quiescent for writes to GITS_BASER<n> or
2716          * GITS_CBASER to not have UNPREDICTABLE results.
2717          */
2718         if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
2719                 return 0;
2720
2721         /* Disable the generation of all interrupts to this ITS */
2722         val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
2723         writel_relaxed(val, base + GITS_CTLR);
2724
2725         /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2726         while (1) {
2727                 val = readl_relaxed(base + GITS_CTLR);
2728                 if (val & GITS_CTLR_QUIESCENT)
2729                         return 0;
2730
2731                 count--;
2732                 if (!count)
2733                         return -EBUSY;
2734
2735                 cpu_relax();
2736                 udelay(1);
2737         }
2738 }
2739
2740 static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
2741 {
2742         struct its_node *its = data;
2743
2744         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
2745 }
2746
2747 static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
2748 {
2749         struct its_node *its = data;
2750
2751         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
2752 }
2753
2754 static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
2755 {
2756         struct its_node *its = data;
2757
2758         /* On QDF2400, the size of the ITE is 16Bytes */
2759         its->ite_size = 16;
2760 }
2761
2762 static const struct gic_quirk its_quirks[] = {
2763 #ifdef CONFIG_CAVIUM_ERRATUM_22375
2764         {
2765                 .desc   = "ITS: Cavium errata 22375, 24313",
2766                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
2767                 .mask   = 0xffff0fff,
2768                 .init   = its_enable_quirk_cavium_22375,
2769         },
2770 #endif
2771 #ifdef CONFIG_CAVIUM_ERRATUM_23144
2772         {
2773                 .desc   = "ITS: Cavium erratum 23144",
2774                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
2775                 .mask   = 0xffff0fff,
2776                 .init   = its_enable_quirk_cavium_23144,
2777         },
2778 #endif
2779 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
2780         {
2781                 .desc   = "ITS: QDF2400 erratum 0065",
2782                 .iidr   = 0x00001070, /* QDF2400 ITS rev 1.x */
2783                 .mask   = 0xffffffff,
2784                 .init   = its_enable_quirk_qdf2400_e0065,
2785         },
2786 #endif
2787         {
2788         }
2789 };
2790
2791 static void its_enable_quirks(struct its_node *its)
2792 {
2793         u32 iidr = readl_relaxed(its->base + GITS_IIDR);
2794
2795         gic_enable_quirks(iidr, its_quirks, its);
2796 }
2797
2798 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
2799 {
2800         struct irq_domain *inner_domain;
2801         struct msi_domain_info *info;
2802
2803         info = kzalloc(sizeof(*info), GFP_KERNEL);
2804         if (!info)
2805                 return -ENOMEM;
2806
2807         inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
2808         if (!inner_domain) {
2809                 kfree(info);
2810                 return -ENOMEM;
2811         }
2812
2813         inner_domain->parent = its_parent;
2814         irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
2815         inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
2816         info->ops = &its_msi_domain_ops;
2817         info->data = its;
2818         inner_domain->host_data = info;
2819
2820         return 0;
2821 }
2822
2823 static int its_init_vpe_domain(void)
2824 {
2825         struct its_node *its;
2826         u32 devid;
2827         int entries;
2828
2829         if (gic_rdists->has_direct_lpi) {
2830                 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
2831                 return 0;
2832         }
2833
2834         /* Any ITS will do, even if not v4 */
2835         its = list_first_entry(&its_nodes, struct its_node, entry);
2836
2837         entries = roundup_pow_of_two(nr_cpu_ids);
2838         vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
2839                                  GFP_KERNEL);
2840         if (!vpe_proxy.vpes) {
2841                 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
2842                 return -ENOMEM;
2843         }
2844
2845         /* Use the last possible DevID */
2846         devid = GENMASK(its->device_ids - 1, 0);
2847         vpe_proxy.dev = its_create_device(its, devid, entries, false);
2848         if (!vpe_proxy.dev) {
2849                 kfree(vpe_proxy.vpes);
2850                 pr_err("ITS: Can't allocate GICv4 proxy device\n");
2851                 return -ENOMEM;
2852         }
2853
2854         BUG_ON(entries != vpe_proxy.dev->nr_ites);
2855
2856         raw_spin_lock_init(&vpe_proxy.lock);
2857         vpe_proxy.next_victim = 0;
2858         pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
2859                 devid, vpe_proxy.dev->nr_ites);
2860
2861         return 0;
2862 }
2863
2864 static int __init its_compute_its_list_map(struct resource *res,
2865                                            void __iomem *its_base)
2866 {
2867         int its_number;
2868         u32 ctlr;
2869
2870         /*
2871          * This is assumed to be done early enough that we're
2872          * guaranteed to be single-threaded, hence no
2873          * locking. Should this change, we should address
2874          * this.
2875          */
2876         its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
2877         if (its_number >= ITS_LIST_MAX) {
2878                 pr_err("ITS@%pa: No ITSList entry available!\n",
2879                        &res->start);
2880                 return -EINVAL;
2881         }
2882
2883         ctlr = readl_relaxed(its_base + GITS_CTLR);
2884         ctlr &= ~GITS_CTLR_ITS_NUMBER;
2885         ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
2886         writel_relaxed(ctlr, its_base + GITS_CTLR);
2887         ctlr = readl_relaxed(its_base + GITS_CTLR);
2888         if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
2889                 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
2890                 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
2891         }
2892
2893         if (test_and_set_bit(its_number, &its_list_map)) {
2894                 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
2895                        &res->start, its_number);
2896                 return -EINVAL;
2897         }
2898
2899         return its_number;
2900 }
2901
2902 static int __init its_probe_one(struct resource *res,
2903                                 struct fwnode_handle *handle, int numa_node)
2904 {
2905         struct its_node *its;
2906         void __iomem *its_base;
2907         u32 val, ctlr;
2908         u64 baser, tmp, typer;
2909         int err;
2910
2911         its_base = ioremap(res->start, resource_size(res));
2912         if (!its_base) {
2913                 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
2914                 return -ENOMEM;
2915         }
2916
2917         val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
2918         if (val != 0x30 && val != 0x40) {
2919                 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
2920                 err = -ENODEV;
2921                 goto out_unmap;
2922         }
2923
2924         err = its_force_quiescent(its_base);
2925         if (err) {
2926                 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
2927                 goto out_unmap;
2928         }
2929
2930         pr_info("ITS %pR\n", res);
2931
2932         its = kzalloc(sizeof(*its), GFP_KERNEL);
2933         if (!its) {
2934                 err = -ENOMEM;
2935                 goto out_unmap;
2936         }
2937
2938         raw_spin_lock_init(&its->lock);
2939         INIT_LIST_HEAD(&its->entry);
2940         INIT_LIST_HEAD(&its->its_device_list);
2941         typer = gic_read_typer(its_base + GITS_TYPER);
2942         its->base = its_base;
2943         its->phys_base = res->start;
2944         its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
2945         its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
2946         if (its->is_v4) {
2947                 if (!(typer & GITS_TYPER_VMOVP)) {
2948                         err = its_compute_its_list_map(res, its_base);
2949                         if (err < 0)
2950                                 goto out_free_its;
2951
2952                         pr_info("ITS@%pa: Using ITS number %d\n",
2953                                 &res->start, err);
2954                 } else {
2955                         pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
2956                 }
2957         }
2958
2959         its->numa_node = numa_node;
2960
2961         its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2962                                                 get_order(ITS_CMD_QUEUE_SZ));
2963         if (!its->cmd_base) {
2964                 err = -ENOMEM;
2965                 goto out_free_its;
2966         }
2967         its->cmd_write = its->cmd_base;
2968
2969         its_enable_quirks(its);
2970
2971         err = its_alloc_tables(its);
2972         if (err)
2973                 goto out_free_cmd;
2974
2975         err = its_alloc_collections(its);
2976         if (err)
2977                 goto out_free_tables;
2978
2979         baser = (virt_to_phys(its->cmd_base)    |
2980                  GITS_CBASER_RaWaWb             |
2981                  GITS_CBASER_InnerShareable     |
2982                  (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
2983                  GITS_CBASER_VALID);
2984
2985         gits_write_cbaser(baser, its->base + GITS_CBASER);
2986         tmp = gits_read_cbaser(its->base + GITS_CBASER);
2987
2988         if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
2989                 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
2990                         /*
2991                          * The HW reports non-shareable, we must
2992                          * remove the cacheability attributes as
2993                          * well.
2994                          */
2995                         baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
2996                                    GITS_CBASER_CACHEABILITY_MASK);
2997                         baser |= GITS_CBASER_nC;
2998                         gits_write_cbaser(baser, its->base + GITS_CBASER);
2999                 }
3000                 pr_info("ITS: using cache flushing for cmd queue\n");
3001                 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3002         }
3003
3004         gits_write_cwriter(0, its->base + GITS_CWRITER);
3005         ctlr = readl_relaxed(its->base + GITS_CTLR);
3006         ctlr |= GITS_CTLR_ENABLE;
3007         if (its->is_v4)
3008                 ctlr |= GITS_CTLR_ImDe;
3009         writel_relaxed(ctlr, its->base + GITS_CTLR);
3010
3011         err = its_init_domain(handle, its);
3012         if (err)
3013                 goto out_free_tables;
3014
3015         spin_lock(&its_lock);
3016         list_add(&its->entry, &its_nodes);
3017         spin_unlock(&its_lock);
3018
3019         return 0;
3020
3021 out_free_tables:
3022         its_free_tables(its);
3023 out_free_cmd:
3024         free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3025 out_free_its:
3026         kfree(its);
3027 out_unmap:
3028         iounmap(its_base);
3029         pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3030         return err;
3031 }
3032
3033 static bool gic_rdists_supports_plpis(void)
3034 {
3035         return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3036 }
3037
3038 int its_cpu_init(void)
3039 {
3040         if (!list_empty(&its_nodes)) {
3041                 if (!gic_rdists_supports_plpis()) {
3042                         pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3043                         return -ENXIO;
3044                 }
3045                 its_cpu_init_lpis();
3046                 its_cpu_init_collection();
3047         }
3048
3049         return 0;
3050 }
3051
3052 static const struct of_device_id its_device_id[] = {
3053         {       .compatible     = "arm,gic-v3-its",     },
3054         {},
3055 };
3056
3057 static int __init its_of_probe(struct device_node *node)
3058 {
3059         struct device_node *np;
3060         struct resource res;
3061
3062         for (np = of_find_matching_node(node, its_device_id); np;
3063              np = of_find_matching_node(np, its_device_id)) {
3064                 if (!of_property_read_bool(np, "msi-controller")) {
3065                         pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3066                                 np);
3067                         continue;
3068                 }
3069
3070                 if (of_address_to_resource(np, 0, &res)) {
3071                         pr_warn("%pOF: no regs?\n", np);
3072                         continue;
3073                 }
3074
3075                 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3076         }
3077         return 0;
3078 }
3079
3080 #ifdef CONFIG_ACPI
3081
3082 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3083
3084 #ifdef CONFIG_ACPI_NUMA
3085 struct its_srat_map {
3086         /* numa node id */
3087         u32     numa_node;
3088         /* GIC ITS ID */
3089         u32     its_id;
3090 };
3091
3092 static struct its_srat_map *its_srat_maps __initdata;
3093 static int its_in_srat __initdata;
3094
3095 static int __init acpi_get_its_numa_node(u32 its_id)
3096 {
3097         int i;
3098
3099         for (i = 0; i < its_in_srat; i++) {
3100                 if (its_id == its_srat_maps[i].its_id)
3101                         return its_srat_maps[i].numa_node;
3102         }
3103         return NUMA_NO_NODE;
3104 }
3105
3106 static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3107                                           const unsigned long end)
3108 {
3109         return 0;
3110 }
3111
3112 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3113                          const unsigned long end)
3114 {
3115         int node;
3116         struct acpi_srat_gic_its_affinity *its_affinity;
3117
3118         its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3119         if (!its_affinity)
3120                 return -EINVAL;
3121
3122         if (its_affinity->header.length < sizeof(*its_affinity)) {
3123                 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3124                         its_affinity->header.length);
3125                 return -EINVAL;
3126         }
3127
3128         node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3129
3130         if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3131                 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3132                 return 0;
3133         }
3134
3135         its_srat_maps[its_in_srat].numa_node = node;
3136         its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3137         its_in_srat++;
3138         pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3139                 its_affinity->proximity_domain, its_affinity->its_id, node);
3140
3141         return 0;
3142 }
3143
3144 static void __init acpi_table_parse_srat_its(void)
3145 {
3146         int count;
3147
3148         count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3149                         sizeof(struct acpi_table_srat),
3150                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3151                         gic_acpi_match_srat_its, 0);
3152         if (count <= 0)
3153                 return;
3154
3155         its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
3156                                 GFP_KERNEL);
3157         if (!its_srat_maps) {
3158                 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3159                 return;
3160         }
3161
3162         acpi_table_parse_entries(ACPI_SIG_SRAT,
3163                         sizeof(struct acpi_table_srat),
3164                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3165                         gic_acpi_parse_srat_its, 0);
3166 }
3167
3168 /* free the its_srat_maps after ITS probing */
3169 static void __init acpi_its_srat_maps_free(void)
3170 {
3171         kfree(its_srat_maps);
3172 }
3173 #else
3174 static void __init acpi_table_parse_srat_its(void)      { }
3175 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
3176 static void __init acpi_its_srat_maps_free(void) { }
3177 #endif
3178
3179 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3180                                           const unsigned long end)
3181 {
3182         struct acpi_madt_generic_translator *its_entry;
3183         struct fwnode_handle *dom_handle;
3184         struct resource res;
3185         int err;
3186
3187         its_entry = (struct acpi_madt_generic_translator *)header;
3188         memset(&res, 0, sizeof(res));
3189         res.start = its_entry->base_address;
3190         res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3191         res.flags = IORESOURCE_MEM;
3192
3193         dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3194         if (!dom_handle) {
3195                 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3196                        &res.start);
3197                 return -ENOMEM;
3198         }
3199
3200         err = iort_register_domain_token(its_entry->translation_id, dom_handle);
3201         if (err) {
3202                 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3203                        &res.start, its_entry->translation_id);
3204                 goto dom_err;
3205         }
3206
3207         err = its_probe_one(&res, dom_handle,
3208                         acpi_get_its_numa_node(its_entry->translation_id));
3209         if (!err)
3210                 return 0;
3211
3212         iort_deregister_domain_token(its_entry->translation_id);
3213 dom_err:
3214         irq_domain_free_fwnode(dom_handle);
3215         return err;
3216 }
3217
3218 static void __init its_acpi_probe(void)
3219 {
3220         acpi_table_parse_srat_its();
3221         acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3222                               gic_acpi_parse_madt_its, 0);
3223         acpi_its_srat_maps_free();
3224 }
3225 #else
3226 static void __init its_acpi_probe(void) { }
3227 #endif
3228
3229 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3230                     struct irq_domain *parent_domain)
3231 {
3232         struct device_node *of_node;
3233         struct its_node *its;
3234         bool has_v4 = false;
3235         int err;
3236
3237         its_parent = parent_domain;
3238         of_node = to_of_node(handle);
3239         if (of_node)
3240                 its_of_probe(of_node);
3241         else
3242                 its_acpi_probe();
3243
3244         if (list_empty(&its_nodes)) {
3245                 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3246                 return -ENXIO;
3247         }
3248
3249         gic_rdists = rdists;
3250         err = its_alloc_lpi_tables();
3251         if (err)
3252                 return err;
3253
3254         list_for_each_entry(its, &its_nodes, entry)
3255                 has_v4 |= its->is_v4;
3256
3257         if (has_v4 & rdists->has_vlpis) {
3258                 if (its_init_vpe_domain() ||
3259                     its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3260                         rdists->has_vlpis = false;
3261                         pr_err("ITS: Disabling GICv4 support\n");
3262                 }
3263         }
3264
3265         return 0;
3266 }