1 /*
2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/list.h>
27 #include <linux/list_sort.h>
28 #include <linux/log2.h>
29 #include <linux/mm.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_pci.h>
35 #include <linux/of_platform.h>
36 #include <linux/percpu.h>
37 #include <linux/slab.h>
38 #include <linux/syscore_ops.h>
39
40 #include <linux/irqchip.h>
41 #include <linux/irqchip/arm-gic-v3.h>
42 #include <linux/irqchip/arm-gic-v4.h>
43
44 #include <asm/cputype.h>
45 #include <asm/exception.h>
46
47 #include "irq-gic-common.h"
48
49 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
50 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
51 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
52
53 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
54
55 static u32 lpi_id_bits;
56
57 /*
58 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
59 * deal with (one configuration byte per interrupt). PENDBASE has to
60 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
61 */
62 #define LPI_NRBITS lpi_id_bits
63 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
64 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
65
66 #define LPI_PROP_DEFAULT_PRIO 0xa0
67
68 /*
69 * Collection structure - just an ID, and a redistributor address to
70 * ping. We use one per CPU as a bag of interrupts assigned to this
71 * CPU.
72 */
73 struct its_collection {
74 u64 target_address;
75 u16 col_id;
76 };
77
78 /*
79 * The ITS_BASER structure - contains memory information, cached
80 * value of BASER register configuration and ITS page size.
81 */
82 struct its_baser {
83 void *base;
84 u64 val;
85 u32 order;
86 u32 psz;
87 };
88
89 struct its_device;
90
91 /*
92 * The ITS structure - contains most of the infrastructure, with the
93 * top-level MSI domain, the command queue, the collections, and the
94 * list of devices writing to it.
95 *
96 * dev_alloc_lock has to be taken for device allocations, while the
97 * spinlock must be taken to parse data structures such as the device
98 * list.
99 */
100 struct its_node {
101 raw_spinlock_t lock;
102 struct mutex dev_alloc_lock;
103 struct list_head entry;
104 void __iomem *base;
105 phys_addr_t phys_base;
106 struct its_cmd_block *cmd_base;
107 struct its_cmd_block *cmd_write;
108 struct its_baser tables[GITS_BASER_NR_REGS];
109 struct its_collection *collections;
110 struct fwnode_handle *fwnode_handle;
111 u64 (*get_msi_base)(struct its_device *its_dev);
112 u64 cbaser_save;
113 u32 ctlr_save;
114 struct list_head its_device_list;
115 u64 flags;
116 unsigned long list_nr;
117 u32 ite_size;
118 u32 device_ids;
119 int numa_node;
120 unsigned int msi_domain_flags;
121 u32 pre_its_base; /* for Socionext Synquacer */
122 bool is_v4;
123 int vlpi_redist_offset;
124 };
125
126 #define ITS_ITT_ALIGN SZ_256
127
128 /* The maximum number of VPEID bits supported by VLPI commands */
129 #define ITS_MAX_VPEID_BITS (16)
130 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
131
132 /* Convert page order to size in bytes */
133 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
134
135 struct event_lpi_map {
136 unsigned long *lpi_map;
137 u16 *col_map;
138 irq_hw_number_t lpi_base;
139 int nr_lpis;
140 struct mutex vlpi_lock;
141 struct its_vm *vm;
142 struct its_vlpi_map *vlpi_maps;
143 int nr_vlpis;
144 };
145
146 /*
147 * The ITS view of a device - belongs to an ITS, owns an interrupt
148 * translation table, and a list of interrupts. If it some of its
149 * LPIs are injected into a guest (GICv4), the event_map.vm field
150 * indicates which one.
151 */
152 struct its_device {
153 struct list_head entry;
154 struct its_node *its;
155 struct event_lpi_map event_map;
156 void *itt;
157 u32 nr_ites;
158 u32 device_id;
159 bool shared;
160 };
161
162 static struct {
163 raw_spinlock_t lock;
164 struct its_device *dev;
165 struct its_vpe **vpes;
166 int next_victim;
167 } vpe_proxy;
168
169 static LIST_HEAD(its_nodes);
170 static DEFINE_RAW_SPINLOCK(its_lock);
171 static struct rdists *gic_rdists;
172 static struct irq_domain *its_parent;
173
174 static unsigned long its_list_map;
175 static u16 vmovp_seq_num;
176 static DEFINE_RAW_SPINLOCK(vmovp_lock);
177
178 static DEFINE_IDA(its_vpeid_ida);
179
180 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
181 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
182 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
183
get_its_list(struct its_vm * vm)184 static u16 get_its_list(struct its_vm *vm)
185 {
186 struct its_node *its;
187 unsigned long its_list = 0;
188
189 list_for_each_entry(its, &its_nodes, entry) {
190 if (!its->is_v4)
191 continue;
192
193 if (vm->vlpi_count[its->list_nr])
194 __set_bit(its->list_nr, &its_list);
195 }
196
197 return (u16)its_list;
198 }
199
dev_event_to_col(struct its_device * its_dev,u32 event)200 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
201 u32 event)
202 {
203 struct its_node *its = its_dev->its;
204
205 return its->collections + its_dev->event_map.col_map[event];
206 }
207
valid_col(struct its_collection * col)208 static struct its_collection *valid_col(struct its_collection *col)
209 {
210 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
211 return NULL;
212
213 return col;
214 }
215
valid_vpe(struct its_node * its,struct its_vpe * vpe)216 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
217 {
218 if (valid_col(its->collections + vpe->col_idx))
219 return vpe;
220
221 return NULL;
222 }
223
224 /*
225 * ITS command descriptors - parameters to be encoded in a command
226 * block.
227 */
228 struct its_cmd_desc {
229 union {
230 struct {
231 struct its_device *dev;
232 u32 event_id;
233 } its_inv_cmd;
234
235 struct {
236 struct its_device *dev;
237 u32 event_id;
238 } its_clear_cmd;
239
240 struct {
241 struct its_device *dev;
242 u32 event_id;
243 } its_int_cmd;
244
245 struct {
246 struct its_device *dev;
247 int valid;
248 } its_mapd_cmd;
249
250 struct {
251 struct its_collection *col;
252 int valid;
253 } its_mapc_cmd;
254
255 struct {
256 struct its_device *dev;
257 u32 phys_id;
258 u32 event_id;
259 } its_mapti_cmd;
260
261 struct {
262 struct its_device *dev;
263 struct its_collection *col;
264 u32 event_id;
265 } its_movi_cmd;
266
267 struct {
268 struct its_device *dev;
269 u32 event_id;
270 } its_discard_cmd;
271
272 struct {
273 struct its_collection *col;
274 } its_invall_cmd;
275
276 struct {
277 struct its_vpe *vpe;
278 } its_vinvall_cmd;
279
280 struct {
281 struct its_vpe *vpe;
282 struct its_collection *col;
283 bool valid;
284 } its_vmapp_cmd;
285
286 struct {
287 struct its_vpe *vpe;
288 struct its_device *dev;
289 u32 virt_id;
290 u32 event_id;
291 bool db_enabled;
292 } its_vmapti_cmd;
293
294 struct {
295 struct its_vpe *vpe;
296 struct its_device *dev;
297 u32 event_id;
298 bool db_enabled;
299 } its_vmovi_cmd;
300
301 struct {
302 struct its_vpe *vpe;
303 struct its_collection *col;
304 u16 seq_num;
305 u16 its_list;
306 } its_vmovp_cmd;
307 };
308 };
309
310 /*
311 * The ITS command block, which is what the ITS actually parses.
312 */
313 struct its_cmd_block {
314 u64 raw_cmd[4];
315 };
316
317 #define ITS_CMD_QUEUE_SZ SZ_64K
318 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
319
320 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
321 struct its_cmd_block *,
322 struct its_cmd_desc *);
323
324 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
325 struct its_cmd_block *,
326 struct its_cmd_desc *);
327
its_mask_encode(u64 * raw_cmd,u64 val,int h,int l)328 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
329 {
330 u64 mask = GENMASK_ULL(h, l);
331 *raw_cmd &= ~mask;
332 *raw_cmd |= (val << l) & mask;
333 }
334
its_encode_cmd(struct its_cmd_block * cmd,u8 cmd_nr)335 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
336 {
337 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
338 }
339
its_encode_devid(struct its_cmd_block * cmd,u32 devid)340 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
341 {
342 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
343 }
344
its_encode_event_id(struct its_cmd_block * cmd,u32 id)345 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
346 {
347 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
348 }
349
its_encode_phys_id(struct its_cmd_block * cmd,u32 phys_id)350 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
351 {
352 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
353 }
354
its_encode_size(struct its_cmd_block * cmd,u8 size)355 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
356 {
357 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
358 }
359
its_encode_itt(struct its_cmd_block * cmd,u64 itt_addr)360 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
361 {
362 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
363 }
364
its_encode_valid(struct its_cmd_block * cmd,int valid)365 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
366 {
367 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
368 }
369
its_encode_target(struct its_cmd_block * cmd,u64 target_addr)370 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
371 {
372 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
373 }
374
its_encode_collection(struct its_cmd_block * cmd,u16 col)375 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
376 {
377 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
378 }
379
its_encode_vpeid(struct its_cmd_block * cmd,u16 vpeid)380 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
381 {
382 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
383 }
384
its_encode_virt_id(struct its_cmd_block * cmd,u32 virt_id)385 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
386 {
387 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
388 }
389
its_encode_db_phys_id(struct its_cmd_block * cmd,u32 db_phys_id)390 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
391 {
392 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
393 }
394
its_encode_db_valid(struct its_cmd_block * cmd,bool db_valid)395 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
396 {
397 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
398 }
399
its_encode_seq_num(struct its_cmd_block * cmd,u16 seq_num)400 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
401 {
402 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
403 }
404
its_encode_its_list(struct its_cmd_block * cmd,u16 its_list)405 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
406 {
407 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
408 }
409
its_encode_vpt_addr(struct its_cmd_block * cmd,u64 vpt_pa)410 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
411 {
412 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
413 }
414
its_encode_vpt_size(struct its_cmd_block * cmd,u8 vpt_size)415 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
416 {
417 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
418 }
419
its_fixup_cmd(struct its_cmd_block * cmd)420 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
421 {
422 /* Let's fixup BE commands */
423 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
424 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
425 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
426 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
427 }
428
its_build_mapd_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)429 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
430 struct its_cmd_block *cmd,
431 struct its_cmd_desc *desc)
432 {
433 unsigned long itt_addr;
434 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
435
436 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
437 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
438
439 its_encode_cmd(cmd, GITS_CMD_MAPD);
440 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
441 its_encode_size(cmd, size - 1);
442 its_encode_itt(cmd, itt_addr);
443 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
444
445 its_fixup_cmd(cmd);
446
447 return NULL;
448 }
449
its_build_mapc_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)450 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
451 struct its_cmd_block *cmd,
452 struct its_cmd_desc *desc)
453 {
454 its_encode_cmd(cmd, GITS_CMD_MAPC);
455 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
456 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
457 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
458
459 its_fixup_cmd(cmd);
460
461 return desc->its_mapc_cmd.col;
462 }
463
its_build_mapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)464 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
465 struct its_cmd_block *cmd,
466 struct its_cmd_desc *desc)
467 {
468 struct its_collection *col;
469
470 col = dev_event_to_col(desc->its_mapti_cmd.dev,
471 desc->its_mapti_cmd.event_id);
472
473 its_encode_cmd(cmd, GITS_CMD_MAPTI);
474 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
475 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
476 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
477 its_encode_collection(cmd, col->col_id);
478
479 its_fixup_cmd(cmd);
480
481 return valid_col(col);
482 }
483
its_build_movi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)484 static struct its_collection *its_build_movi_cmd(struct its_node *its,
485 struct its_cmd_block *cmd,
486 struct its_cmd_desc *desc)
487 {
488 struct its_collection *col;
489
490 col = dev_event_to_col(desc->its_movi_cmd.dev,
491 desc->its_movi_cmd.event_id);
492
493 its_encode_cmd(cmd, GITS_CMD_MOVI);
494 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
495 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
496 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
497
498 its_fixup_cmd(cmd);
499
500 return valid_col(col);
501 }
502
its_build_discard_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)503 static struct its_collection *its_build_discard_cmd(struct its_node *its,
504 struct its_cmd_block *cmd,
505 struct its_cmd_desc *desc)
506 {
507 struct its_collection *col;
508
509 col = dev_event_to_col(desc->its_discard_cmd.dev,
510 desc->its_discard_cmd.event_id);
511
512 its_encode_cmd(cmd, GITS_CMD_DISCARD);
513 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
514 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
515
516 its_fixup_cmd(cmd);
517
518 return valid_col(col);
519 }
520
its_build_inv_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)521 static struct its_collection *its_build_inv_cmd(struct its_node *its,
522 struct its_cmd_block *cmd,
523 struct its_cmd_desc *desc)
524 {
525 struct its_collection *col;
526
527 col = dev_event_to_col(desc->its_inv_cmd.dev,
528 desc->its_inv_cmd.event_id);
529
530 its_encode_cmd(cmd, GITS_CMD_INV);
531 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
532 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
533
534 its_fixup_cmd(cmd);
535
536 return valid_col(col);
537 }
538
its_build_int_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)539 static struct its_collection *its_build_int_cmd(struct its_node *its,
540 struct its_cmd_block *cmd,
541 struct its_cmd_desc *desc)
542 {
543 struct its_collection *col;
544
545 col = dev_event_to_col(desc->its_int_cmd.dev,
546 desc->its_int_cmd.event_id);
547
548 its_encode_cmd(cmd, GITS_CMD_INT);
549 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
550 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
551
552 its_fixup_cmd(cmd);
553
554 return valid_col(col);
555 }
556
its_build_clear_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)557 static struct its_collection *its_build_clear_cmd(struct its_node *its,
558 struct its_cmd_block *cmd,
559 struct its_cmd_desc *desc)
560 {
561 struct its_collection *col;
562
563 col = dev_event_to_col(desc->its_clear_cmd.dev,
564 desc->its_clear_cmd.event_id);
565
566 its_encode_cmd(cmd, GITS_CMD_CLEAR);
567 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
568 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
569
570 its_fixup_cmd(cmd);
571
572 return valid_col(col);
573 }
574
its_build_invall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)575 static struct its_collection *its_build_invall_cmd(struct its_node *its,
576 struct its_cmd_block *cmd,
577 struct its_cmd_desc *desc)
578 {
579 its_encode_cmd(cmd, GITS_CMD_INVALL);
580 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
581
582 its_fixup_cmd(cmd);
583
584 return desc->its_invall_cmd.col;
585 }
586
its_build_vinvall_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)587 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
588 struct its_cmd_block *cmd,
589 struct its_cmd_desc *desc)
590 {
591 its_encode_cmd(cmd, GITS_CMD_VINVALL);
592 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
593
594 its_fixup_cmd(cmd);
595
596 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
597 }
598
its_build_vmapp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)599 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
600 struct its_cmd_block *cmd,
601 struct its_cmd_desc *desc)
602 {
603 unsigned long vpt_addr;
604 u64 target;
605
606 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
607 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
608
609 its_encode_cmd(cmd, GITS_CMD_VMAPP);
610 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
611 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
612 its_encode_target(cmd, target);
613 its_encode_vpt_addr(cmd, vpt_addr);
614 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
615
616 its_fixup_cmd(cmd);
617
618 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
619 }
620
its_build_vmapti_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)621 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
622 struct its_cmd_block *cmd,
623 struct its_cmd_desc *desc)
624 {
625 u32 db;
626
627 if (desc->its_vmapti_cmd.db_enabled)
628 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
629 else
630 db = 1023;
631
632 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
633 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
634 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
635 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
636 its_encode_db_phys_id(cmd, db);
637 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
638
639 its_fixup_cmd(cmd);
640
641 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
642 }
643
its_build_vmovi_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)644 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
645 struct its_cmd_block *cmd,
646 struct its_cmd_desc *desc)
647 {
648 u32 db;
649
650 if (desc->its_vmovi_cmd.db_enabled)
651 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
652 else
653 db = 1023;
654
655 its_encode_cmd(cmd, GITS_CMD_VMOVI);
656 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
657 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
658 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
659 its_encode_db_phys_id(cmd, db);
660 its_encode_db_valid(cmd, true);
661
662 its_fixup_cmd(cmd);
663
664 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
665 }
666
its_build_vmovp_cmd(struct its_node * its,struct its_cmd_block * cmd,struct its_cmd_desc * desc)667 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
668 struct its_cmd_block *cmd,
669 struct its_cmd_desc *desc)
670 {
671 u64 target;
672
673 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
674 its_encode_cmd(cmd, GITS_CMD_VMOVP);
675 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
676 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
677 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
678 its_encode_target(cmd, target);
679
680 its_fixup_cmd(cmd);
681
682 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
683 }
684
its_cmd_ptr_to_offset(struct its_node * its,struct its_cmd_block * ptr)685 static u64 its_cmd_ptr_to_offset(struct its_node *its,
686 struct its_cmd_block *ptr)
687 {
688 return (ptr - its->cmd_base) * sizeof(*ptr);
689 }
690
its_queue_full(struct its_node * its)691 static int its_queue_full(struct its_node *its)
692 {
693 int widx;
694 int ridx;
695
696 widx = its->cmd_write - its->cmd_base;
697 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
698
699 /* This is incredibly unlikely to happen, unless the ITS locks up. */
700 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
701 return 1;
702
703 return 0;
704 }
705
its_allocate_entry(struct its_node * its)706 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
707 {
708 struct its_cmd_block *cmd;
709 u32 count = 1000000; /* 1s! */
710
711 while (its_queue_full(its)) {
712 count--;
713 if (!count) {
714 pr_err_ratelimited("ITS queue not draining\n");
715 return NULL;
716 }
717 cpu_relax();
718 udelay(1);
719 }
720
721 cmd = its->cmd_write++;
722
723 /* Handle queue wrapping */
724 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
725 its->cmd_write = its->cmd_base;
726
727 /* Clear command */
728 cmd->raw_cmd[0] = 0;
729 cmd->raw_cmd[1] = 0;
730 cmd->raw_cmd[2] = 0;
731 cmd->raw_cmd[3] = 0;
732
733 return cmd;
734 }
735
its_post_commands(struct its_node * its)736 static struct its_cmd_block *its_post_commands(struct its_node *its)
737 {
738 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
739
740 writel_relaxed(wr, its->base + GITS_CWRITER);
741
742 return its->cmd_write;
743 }
744
its_flush_cmd(struct its_node * its,struct its_cmd_block * cmd)745 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
746 {
747 /*
748 * Make sure the commands written to memory are observable by
749 * the ITS.
750 */
751 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
752 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
753 else
754 dsb(ishst);
755 }
756
its_wait_for_range_completion(struct its_node * its,u64 prev_idx,struct its_cmd_block * to)757 static int its_wait_for_range_completion(struct its_node *its,
758 u64 prev_idx,
759 struct its_cmd_block *to)
760 {
761 u64 rd_idx, to_idx, linear_idx;
762 u32 count = 1000000; /* 1s! */
763
764 /* Linearize to_idx if the command set has wrapped around */
765 to_idx = its_cmd_ptr_to_offset(its, to);
766 if (to_idx < prev_idx)
767 to_idx += ITS_CMD_QUEUE_SZ;
768
769 linear_idx = prev_idx;
770
771 while (1) {
772 s64 delta;
773
774 rd_idx = readl_relaxed(its->base + GITS_CREADR);
775
776 /*
777 * Compute the read pointer progress, taking the
778 * potential wrap-around into account.
779 */
780 delta = rd_idx - prev_idx;
781 if (rd_idx < prev_idx)
782 delta += ITS_CMD_QUEUE_SZ;
783
784 linear_idx += delta;
785 if (linear_idx >= to_idx)
786 break;
787
788 count--;
789 if (!count) {
790 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
791 to_idx, linear_idx);
792 return -1;
793 }
794 prev_idx = rd_idx;
795 cpu_relax();
796 udelay(1);
797 }
798
799 return 0;
800 }
801
802 /* Warning, macro hell follows */
803 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
804 void name(struct its_node *its, \
805 buildtype builder, \
806 struct its_cmd_desc *desc) \
807 { \
808 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
809 synctype *sync_obj; \
810 unsigned long flags; \
811 u64 rd_idx; \
812 \
813 raw_spin_lock_irqsave(&its->lock, flags); \
814 \
815 cmd = its_allocate_entry(its); \
816 if (!cmd) { /* We're soooooo screewed... */ \
817 raw_spin_unlock_irqrestore(&its->lock, flags); \
818 return; \
819 } \
820 sync_obj = builder(its, cmd, desc); \
821 its_flush_cmd(its, cmd); \
822 \
823 if (sync_obj) { \
824 sync_cmd = its_allocate_entry(its); \
825 if (!sync_cmd) \
826 goto post; \
827 \
828 buildfn(its, sync_cmd, sync_obj); \
829 its_flush_cmd(its, sync_cmd); \
830 } \
831 \
832 post: \
833 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
834 next_cmd = its_post_commands(its); \
835 raw_spin_unlock_irqrestore(&its->lock, flags); \
836 \
837 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
838 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
839 }
840
its_build_sync_cmd(struct its_node * its,struct its_cmd_block * sync_cmd,struct its_collection * sync_col)841 static void its_build_sync_cmd(struct its_node *its,
842 struct its_cmd_block *sync_cmd,
843 struct its_collection *sync_col)
844 {
845 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
846 its_encode_target(sync_cmd, sync_col->target_address);
847
848 its_fixup_cmd(sync_cmd);
849 }
850
BUILD_SINGLE_CMD_FUNC(its_send_single_command,its_cmd_builder_t,struct its_collection,its_build_sync_cmd)851 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
852 struct its_collection, its_build_sync_cmd)
853
854 static void its_build_vsync_cmd(struct its_node *its,
855 struct its_cmd_block *sync_cmd,
856 struct its_vpe *sync_vpe)
857 {
858 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
859 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
860
861 its_fixup_cmd(sync_cmd);
862 }
863
BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand,its_cmd_vbuilder_t,struct its_vpe,its_build_vsync_cmd)864 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
865 struct its_vpe, its_build_vsync_cmd)
866
867 static void its_send_int(struct its_device *dev, u32 event_id)
868 {
869 struct its_cmd_desc desc;
870
871 desc.its_int_cmd.dev = dev;
872 desc.its_int_cmd.event_id = event_id;
873
874 its_send_single_command(dev->its, its_build_int_cmd, &desc);
875 }
876
its_send_clear(struct its_device * dev,u32 event_id)877 static void its_send_clear(struct its_device *dev, u32 event_id)
878 {
879 struct its_cmd_desc desc;
880
881 desc.its_clear_cmd.dev = dev;
882 desc.its_clear_cmd.event_id = event_id;
883
884 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
885 }
886
its_send_inv(struct its_device * dev,u32 event_id)887 static void its_send_inv(struct its_device *dev, u32 event_id)
888 {
889 struct its_cmd_desc desc;
890
891 desc.its_inv_cmd.dev = dev;
892 desc.its_inv_cmd.event_id = event_id;
893
894 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
895 }
896
its_send_mapd(struct its_device * dev,int valid)897 static void its_send_mapd(struct its_device *dev, int valid)
898 {
899 struct its_cmd_desc desc;
900
901 desc.its_mapd_cmd.dev = dev;
902 desc.its_mapd_cmd.valid = !!valid;
903
904 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
905 }
906
its_send_mapc(struct its_node * its,struct its_collection * col,int valid)907 static void its_send_mapc(struct its_node *its, struct its_collection *col,
908 int valid)
909 {
910 struct its_cmd_desc desc;
911
912 desc.its_mapc_cmd.col = col;
913 desc.its_mapc_cmd.valid = !!valid;
914
915 its_send_single_command(its, its_build_mapc_cmd, &desc);
916 }
917
its_send_mapti(struct its_device * dev,u32 irq_id,u32 id)918 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
919 {
920 struct its_cmd_desc desc;
921
922 desc.its_mapti_cmd.dev = dev;
923 desc.its_mapti_cmd.phys_id = irq_id;
924 desc.its_mapti_cmd.event_id = id;
925
926 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
927 }
928
its_send_movi(struct its_device * dev,struct its_collection * col,u32 id)929 static void its_send_movi(struct its_device *dev,
930 struct its_collection *col, u32 id)
931 {
932 struct its_cmd_desc desc;
933
934 desc.its_movi_cmd.dev = dev;
935 desc.its_movi_cmd.col = col;
936 desc.its_movi_cmd.event_id = id;
937
938 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
939 }
940
its_send_discard(struct its_device * dev,u32 id)941 static void its_send_discard(struct its_device *dev, u32 id)
942 {
943 struct its_cmd_desc desc;
944
945 desc.its_discard_cmd.dev = dev;
946 desc.its_discard_cmd.event_id = id;
947
948 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
949 }
950
its_send_invall(struct its_node * its,struct its_collection * col)951 static void its_send_invall(struct its_node *its, struct its_collection *col)
952 {
953 struct its_cmd_desc desc;
954
955 desc.its_invall_cmd.col = col;
956
957 its_send_single_command(its, its_build_invall_cmd, &desc);
958 }
959
its_send_vmapti(struct its_device * dev,u32 id)960 static void its_send_vmapti(struct its_device *dev, u32 id)
961 {
962 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
963 struct its_cmd_desc desc;
964
965 desc.its_vmapti_cmd.vpe = map->vpe;
966 desc.its_vmapti_cmd.dev = dev;
967 desc.its_vmapti_cmd.virt_id = map->vintid;
968 desc.its_vmapti_cmd.event_id = id;
969 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
970
971 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
972 }
973
its_send_vmovi(struct its_device * dev,u32 id)974 static void its_send_vmovi(struct its_device *dev, u32 id)
975 {
976 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
977 struct its_cmd_desc desc;
978
979 desc.its_vmovi_cmd.vpe = map->vpe;
980 desc.its_vmovi_cmd.dev = dev;
981 desc.its_vmovi_cmd.event_id = id;
982 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
983
984 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
985 }
986
its_send_vmapp(struct its_node * its,struct its_vpe * vpe,bool valid)987 static void its_send_vmapp(struct its_node *its,
988 struct its_vpe *vpe, bool valid)
989 {
990 struct its_cmd_desc desc;
991
992 desc.its_vmapp_cmd.vpe = vpe;
993 desc.its_vmapp_cmd.valid = valid;
994 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
995
996 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
997 }
998
its_send_vmovp(struct its_vpe * vpe)999 static void its_send_vmovp(struct its_vpe *vpe)
1000 {
1001 struct its_cmd_desc desc = {};
1002 struct its_node *its;
1003 unsigned long flags;
1004 int col_id = vpe->col_idx;
1005
1006 desc.its_vmovp_cmd.vpe = vpe;
1007
1008 if (!its_list_map) {
1009 its = list_first_entry(&its_nodes, struct its_node, entry);
1010 desc.its_vmovp_cmd.col = &its->collections[col_id];
1011 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1012 return;
1013 }
1014
1015 /*
1016 * Yet another marvel of the architecture. If using the
1017 * its_list "feature", we need to make sure that all ITSs
1018 * receive all VMOVP commands in the same order. The only way
1019 * to guarantee this is to make vmovp a serialization point.
1020 *
1021 * Wall <-- Head.
1022 */
1023 raw_spin_lock_irqsave(&vmovp_lock, flags);
1024
1025 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1026 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1027
1028 /* Emit VMOVPs */
1029 list_for_each_entry(its, &its_nodes, entry) {
1030 if (!its->is_v4)
1031 continue;
1032
1033 if (!vpe->its_vm->vlpi_count[its->list_nr])
1034 continue;
1035
1036 desc.its_vmovp_cmd.col = &its->collections[col_id];
1037 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1038 }
1039
1040 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1041 }
1042
its_send_vinvall(struct its_node * its,struct its_vpe * vpe)1043 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1044 {
1045 struct its_cmd_desc desc;
1046
1047 desc.its_vinvall_cmd.vpe = vpe;
1048 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1049 }
1050
1051 /*
1052 * irqchip functions - assumes MSI, mostly.
1053 */
1054
its_get_event_id(struct irq_data * d)1055 static inline u32 its_get_event_id(struct irq_data *d)
1056 {
1057 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1058 return d->hwirq - its_dev->event_map.lpi_base;
1059 }
1060
lpi_write_config(struct irq_data * d,u8 clr,u8 set)1061 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1062 {
1063 irq_hw_number_t hwirq;
1064 struct page *prop_page;
1065 u8 *cfg;
1066
1067 if (irqd_is_forwarded_to_vcpu(d)) {
1068 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1069 u32 event = its_get_event_id(d);
1070 struct its_vlpi_map *map;
1071
1072 prop_page = its_dev->event_map.vm->vprop_page;
1073 map = &its_dev->event_map.vlpi_maps[event];
1074 hwirq = map->vintid;
1075
1076 /* Remember the updated property */
1077 map->properties &= ~clr;
1078 map->properties |= set | LPI_PROP_GROUP1;
1079 } else {
1080 prop_page = gic_rdists->prop_page;
1081 hwirq = d->hwirq;
1082 }
1083
1084 cfg = page_address(prop_page) + hwirq - 8192;
1085 *cfg &= ~clr;
1086 *cfg |= set | LPI_PROP_GROUP1;
1087
1088 /*
1089 * Make the above write visible to the redistributors.
1090 * And yes, we're flushing exactly: One. Single. Byte.
1091 * Humpf...
1092 */
1093 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1094 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1095 else
1096 dsb(ishst);
1097 }
1098
lpi_update_config(struct irq_data * d,u8 clr,u8 set)1099 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1100 {
1101 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1102
1103 lpi_write_config(d, clr, set);
1104 its_send_inv(its_dev, its_get_event_id(d));
1105 }
1106
its_vlpi_set_doorbell(struct irq_data * d,bool enable)1107 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1108 {
1109 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1110 u32 event = its_get_event_id(d);
1111
1112 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1113 return;
1114
1115 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1116
1117 /*
1118 * More fun with the architecture:
1119 *
1120 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1121 * value or to 1023, depending on the enable bit. But that
1122 * would be issueing a mapping for an /existing/ DevID+EventID
1123 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1124 * to the /same/ vPE, using this opportunity to adjust the
1125 * doorbell. Mouahahahaha. We loves it, Precious.
1126 */
1127 its_send_vmovi(its_dev, event);
1128 }
1129
its_mask_irq(struct irq_data * d)1130 static void its_mask_irq(struct irq_data *d)
1131 {
1132 if (irqd_is_forwarded_to_vcpu(d))
1133 its_vlpi_set_doorbell(d, false);
1134
1135 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1136 }
1137
its_unmask_irq(struct irq_data * d)1138 static void its_unmask_irq(struct irq_data *d)
1139 {
1140 if (irqd_is_forwarded_to_vcpu(d))
1141 its_vlpi_set_doorbell(d, true);
1142
1143 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1144 }
1145
its_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1146 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1147 bool force)
1148 {
1149 unsigned int cpu;
1150 const struct cpumask *cpu_mask = cpu_online_mask;
1151 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1152 struct its_collection *target_col;
1153 u32 id = its_get_event_id(d);
1154
1155 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1156 if (irqd_is_forwarded_to_vcpu(d))
1157 return -EINVAL;
1158
1159 /* lpi cannot be routed to a redistributor that is on a foreign node */
1160 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1161 if (its_dev->its->numa_node >= 0) {
1162 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1163 if (!cpumask_intersects(mask_val, cpu_mask))
1164 return -EINVAL;
1165 }
1166 }
1167
1168 cpu = cpumask_any_and(mask_val, cpu_mask);
1169
1170 if (cpu >= nr_cpu_ids)
1171 return -EINVAL;
1172
1173 /* don't set the affinity when the target cpu is same as current one */
1174 if (cpu != its_dev->event_map.col_map[id]) {
1175 target_col = &its_dev->its->collections[cpu];
1176 its_send_movi(its_dev, target_col, id);
1177 its_dev->event_map.col_map[id] = cpu;
1178 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1179 }
1180
1181 return IRQ_SET_MASK_OK_DONE;
1182 }
1183
its_irq_get_msi_base(struct its_device * its_dev)1184 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1185 {
1186 struct its_node *its = its_dev->its;
1187
1188 return its->phys_base + GITS_TRANSLATER;
1189 }
1190
its_irq_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)1191 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1192 {
1193 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1194 struct its_node *its;
1195 u64 addr;
1196
1197 its = its_dev->its;
1198 addr = its->get_msi_base(its_dev);
1199
1200 msg->address_lo = lower_32_bits(addr);
1201 msg->address_hi = upper_32_bits(addr);
1202 msg->data = its_get_event_id(d);
1203
1204 iommu_dma_map_msi_msg(d->irq, msg);
1205 }
1206
its_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)1207 static int its_irq_set_irqchip_state(struct irq_data *d,
1208 enum irqchip_irq_state which,
1209 bool state)
1210 {
1211 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1212 u32 event = its_get_event_id(d);
1213
1214 if (which != IRQCHIP_STATE_PENDING)
1215 return -EINVAL;
1216
1217 if (state)
1218 its_send_int(its_dev, event);
1219 else
1220 its_send_clear(its_dev, event);
1221
1222 return 0;
1223 }
1224
its_map_vm(struct its_node * its,struct its_vm * vm)1225 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1226 {
1227 unsigned long flags;
1228
1229 /* Not using the ITS list? Everything is always mapped. */
1230 if (!its_list_map)
1231 return;
1232
1233 raw_spin_lock_irqsave(&vmovp_lock, flags);
1234
1235 /*
1236 * If the VM wasn't mapped yet, iterate over the vpes and get
1237 * them mapped now.
1238 */
1239 vm->vlpi_count[its->list_nr]++;
1240
1241 if (vm->vlpi_count[its->list_nr] == 1) {
1242 int i;
1243
1244 for (i = 0; i < vm->nr_vpes; i++) {
1245 struct its_vpe *vpe = vm->vpes[i];
1246 struct irq_data *d = irq_get_irq_data(vpe->irq);
1247
1248 /* Map the VPE to the first possible CPU */
1249 vpe->col_idx = cpumask_first(cpu_online_mask);
1250 its_send_vmapp(its, vpe, true);
1251 its_send_vinvall(its, vpe);
1252 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1253 }
1254 }
1255
1256 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1257 }
1258
its_unmap_vm(struct its_node * its,struct its_vm * vm)1259 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1260 {
1261 unsigned long flags;
1262
1263 /* Not using the ITS list? Everything is always mapped. */
1264 if (!its_list_map)
1265 return;
1266
1267 raw_spin_lock_irqsave(&vmovp_lock, flags);
1268
1269 if (!--vm->vlpi_count[its->list_nr]) {
1270 int i;
1271
1272 for (i = 0; i < vm->nr_vpes; i++)
1273 its_send_vmapp(its, vm->vpes[i], false);
1274 }
1275
1276 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1277 }
1278
its_vlpi_map(struct irq_data * d,struct its_cmd_info * info)1279 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1280 {
1281 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1282 u32 event = its_get_event_id(d);
1283 int ret = 0;
1284
1285 if (!info->map)
1286 return -EINVAL;
1287
1288 mutex_lock(&its_dev->event_map.vlpi_lock);
1289
1290 if (!its_dev->event_map.vm) {
1291 struct its_vlpi_map *maps;
1292
1293 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1294 GFP_KERNEL);
1295 if (!maps) {
1296 ret = -ENOMEM;
1297 goto out;
1298 }
1299
1300 its_dev->event_map.vm = info->map->vm;
1301 its_dev->event_map.vlpi_maps = maps;
1302 } else if (its_dev->event_map.vm != info->map->vm) {
1303 ret = -EINVAL;
1304 goto out;
1305 }
1306
1307 /* Get our private copy of the mapping information */
1308 its_dev->event_map.vlpi_maps[event] = *info->map;
1309
1310 if (irqd_is_forwarded_to_vcpu(d)) {
1311 /* Already mapped, move it around */
1312 its_send_vmovi(its_dev, event);
1313 } else {
1314 /* Ensure all the VPEs are mapped on this ITS */
1315 its_map_vm(its_dev->its, info->map->vm);
1316
1317 /*
1318 * Flag the interrupt as forwarded so that we can
1319 * start poking the virtual property table.
1320 */
1321 irqd_set_forwarded_to_vcpu(d);
1322
1323 /* Write out the property to the prop table */
1324 lpi_write_config(d, 0xff, info->map->properties);
1325
1326 /* Drop the physical mapping */
1327 its_send_discard(its_dev, event);
1328
1329 /* and install the virtual one */
1330 its_send_vmapti(its_dev, event);
1331
1332 /* Increment the number of VLPIs */
1333 its_dev->event_map.nr_vlpis++;
1334 }
1335
1336 out:
1337 mutex_unlock(&its_dev->event_map.vlpi_lock);
1338 return ret;
1339 }
1340
its_vlpi_get(struct irq_data * d,struct its_cmd_info * info)1341 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1342 {
1343 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1344 u32 event = its_get_event_id(d);
1345 int ret = 0;
1346
1347 mutex_lock(&its_dev->event_map.vlpi_lock);
1348
1349 if (!its_dev->event_map.vm ||
1350 !its_dev->event_map.vlpi_maps[event].vm) {
1351 ret = -EINVAL;
1352 goto out;
1353 }
1354
1355 /* Copy our mapping information to the incoming request */
1356 *info->map = its_dev->event_map.vlpi_maps[event];
1357
1358 out:
1359 mutex_unlock(&its_dev->event_map.vlpi_lock);
1360 return ret;
1361 }
1362
its_vlpi_unmap(struct irq_data * d)1363 static int its_vlpi_unmap(struct irq_data *d)
1364 {
1365 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1366 u32 event = its_get_event_id(d);
1367 int ret = 0;
1368
1369 mutex_lock(&its_dev->event_map.vlpi_lock);
1370
1371 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1372 ret = -EINVAL;
1373 goto out;
1374 }
1375
1376 /* Drop the virtual mapping */
1377 its_send_discard(its_dev, event);
1378
1379 /* and restore the physical one */
1380 irqd_clr_forwarded_to_vcpu(d);
1381 its_send_mapti(its_dev, d->hwirq, event);
1382 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1383 LPI_PROP_ENABLED |
1384 LPI_PROP_GROUP1));
1385
1386 /* Potentially unmap the VM from this ITS */
1387 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1388
1389 /*
1390 * Drop the refcount and make the device available again if
1391 * this was the last VLPI.
1392 */
1393 if (!--its_dev->event_map.nr_vlpis) {
1394 its_dev->event_map.vm = NULL;
1395 kfree(its_dev->event_map.vlpi_maps);
1396 }
1397
1398 out:
1399 mutex_unlock(&its_dev->event_map.vlpi_lock);
1400 return ret;
1401 }
1402
its_vlpi_prop_update(struct irq_data * d,struct its_cmd_info * info)1403 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1404 {
1405 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1406
1407 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1408 return -EINVAL;
1409
1410 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1411 lpi_update_config(d, 0xff, info->config);
1412 else
1413 lpi_write_config(d, 0xff, info->config);
1414 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1415
1416 return 0;
1417 }
1418
its_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)1419 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1420 {
1421 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1422 struct its_cmd_info *info = vcpu_info;
1423
1424 /* Need a v4 ITS */
1425 if (!its_dev->its->is_v4)
1426 return -EINVAL;
1427
1428 /* Unmap request? */
1429 if (!info)
1430 return its_vlpi_unmap(d);
1431
1432 switch (info->cmd_type) {
1433 case MAP_VLPI:
1434 return its_vlpi_map(d, info);
1435
1436 case GET_VLPI:
1437 return its_vlpi_get(d, info);
1438
1439 case PROP_UPDATE_VLPI:
1440 case PROP_UPDATE_AND_INV_VLPI:
1441 return its_vlpi_prop_update(d, info);
1442
1443 default:
1444 return -EINVAL;
1445 }
1446 }
1447
1448 static struct irq_chip its_irq_chip = {
1449 .name = "ITS",
1450 .irq_mask = its_mask_irq,
1451 .irq_unmask = its_unmask_irq,
1452 .irq_eoi = irq_chip_eoi_parent,
1453 .irq_set_affinity = its_set_affinity,
1454 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1455 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1456 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1457 };
1458
1459
1460 /*
1461 * How we allocate LPIs:
1462 *
1463 * lpi_range_list contains ranges of LPIs that are to available to
1464 * allocate from. To allocate LPIs, just pick the first range that
1465 * fits the required allocation, and reduce it by the required
1466 * amount. Once empty, remove the range from the list.
1467 *
1468 * To free a range of LPIs, add a free range to the list, sort it and
1469 * merge the result if the new range happens to be adjacent to an
1470 * already free block.
1471 *
1472 * The consequence of the above is that allocation is cost is low, but
1473 * freeing is expensive. We assumes that freeing rarely occurs.
1474 */
1475 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1476
1477 static DEFINE_MUTEX(lpi_range_lock);
1478 static LIST_HEAD(lpi_range_list);
1479
1480 struct lpi_range {
1481 struct list_head entry;
1482 u32 base_id;
1483 u32 span;
1484 };
1485
mk_lpi_range(u32 base,u32 span)1486 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1487 {
1488 struct lpi_range *range;
1489
1490 range = kzalloc(sizeof(*range), GFP_KERNEL);
1491 if (range) {
1492 INIT_LIST_HEAD(&range->entry);
1493 range->base_id = base;
1494 range->span = span;
1495 }
1496
1497 return range;
1498 }
1499
lpi_range_cmp(void * priv,struct list_head * a,struct list_head * b)1500 static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1501 {
1502 struct lpi_range *ra, *rb;
1503
1504 ra = container_of(a, struct lpi_range, entry);
1505 rb = container_of(b, struct lpi_range, entry);
1506
1507 return ra->base_id - rb->base_id;
1508 }
1509
merge_lpi_ranges(void)1510 static void merge_lpi_ranges(void)
1511 {
1512 struct lpi_range *range, *tmp;
1513
1514 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1515 if (!list_is_last(&range->entry, &lpi_range_list) &&
1516 (tmp->base_id == (range->base_id + range->span))) {
1517 tmp->base_id = range->base_id;
1518 tmp->span += range->span;
1519 list_del(&range->entry);
1520 kfree(range);
1521 }
1522 }
1523 }
1524
alloc_lpi_range(u32 nr_lpis,u32 * base)1525 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1526 {
1527 struct lpi_range *range, *tmp;
1528 int err = -ENOSPC;
1529
1530 mutex_lock(&lpi_range_lock);
1531
1532 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1533 if (range->span >= nr_lpis) {
1534 *base = range->base_id;
1535 range->base_id += nr_lpis;
1536 range->span -= nr_lpis;
1537
1538 if (range->span == 0) {
1539 list_del(&range->entry);
1540 kfree(range);
1541 }
1542
1543 err = 0;
1544 break;
1545 }
1546 }
1547
1548 mutex_unlock(&lpi_range_lock);
1549
1550 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1551 return err;
1552 }
1553
free_lpi_range(u32 base,u32 nr_lpis)1554 static int free_lpi_range(u32 base, u32 nr_lpis)
1555 {
1556 struct lpi_range *new;
1557 int err = 0;
1558
1559 mutex_lock(&lpi_range_lock);
1560
1561 new = mk_lpi_range(base, nr_lpis);
1562 if (!new) {
1563 err = -ENOMEM;
1564 goto out;
1565 }
1566
1567 list_add(&new->entry, &lpi_range_list);
1568 list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1569 merge_lpi_ranges();
1570 out:
1571 mutex_unlock(&lpi_range_lock);
1572 return err;
1573 }
1574
its_lpi_init(u32 id_bits)1575 static int __init its_lpi_init(u32 id_bits)
1576 {
1577 u32 lpis = (1UL << id_bits) - 8192;
1578 u32 numlpis;
1579 int err;
1580
1581 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1582
1583 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1584 lpis = numlpis;
1585 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1586 lpis);
1587 }
1588
1589 /*
1590 * Initializing the allocator is just the same as freeing the
1591 * full range of LPIs.
1592 */
1593 err = free_lpi_range(8192, lpis);
1594 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1595 return err;
1596 }
1597
its_lpi_alloc(int nr_irqs,u32 * base,int * nr_ids)1598 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1599 {
1600 unsigned long *bitmap = NULL;
1601 int err = 0;
1602
1603 do {
1604 err = alloc_lpi_range(nr_irqs, base);
1605 if (!err)
1606 break;
1607
1608 nr_irqs /= 2;
1609 } while (nr_irqs > 0);
1610
1611 if (!nr_irqs)
1612 err = -ENOSPC;
1613
1614 if (err)
1615 goto out;
1616
1617 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1618 if (!bitmap)
1619 goto out;
1620
1621 *nr_ids = nr_irqs;
1622
1623 out:
1624 if (!bitmap)
1625 *base = *nr_ids = 0;
1626
1627 return bitmap;
1628 }
1629
its_lpi_free(unsigned long * bitmap,u32 base,u32 nr_ids)1630 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1631 {
1632 WARN_ON(free_lpi_range(base, nr_ids));
1633 kfree(bitmap);
1634 }
1635
its_allocate_prop_table(gfp_t gfp_flags)1636 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1637 {
1638 struct page *prop_page;
1639
1640 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1641 if (!prop_page)
1642 return NULL;
1643
1644 /* Priority 0xa0, Group-1, disabled */
1645 memset(page_address(prop_page),
1646 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1647 LPI_PROPBASE_SZ);
1648
1649 /* Make sure the GIC will observe the written configuration */
1650 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1651
1652 return prop_page;
1653 }
1654
its_free_prop_table(struct page * prop_page)1655 static void its_free_prop_table(struct page *prop_page)
1656 {
1657 free_pages((unsigned long)page_address(prop_page),
1658 get_order(LPI_PROPBASE_SZ));
1659 }
1660
its_alloc_lpi_tables(void)1661 static int __init its_alloc_lpi_tables(void)
1662 {
1663 phys_addr_t paddr;
1664
1665 lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1666 ITS_MAX_LPI_NRBITS);
1667 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1668 if (!gic_rdists->prop_page) {
1669 pr_err("Failed to allocate PROPBASE\n");
1670 return -ENOMEM;
1671 }
1672
1673 paddr = page_to_phys(gic_rdists->prop_page);
1674 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1675
1676 return its_lpi_init(lpi_id_bits);
1677 }
1678
1679 static const char *its_base_type_string[] = {
1680 [GITS_BASER_TYPE_DEVICE] = "Devices",
1681 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1682 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1683 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1684 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1685 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1686 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1687 };
1688
its_read_baser(struct its_node * its,struct its_baser * baser)1689 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1690 {
1691 u32 idx = baser - its->tables;
1692
1693 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1694 }
1695
its_write_baser(struct its_node * its,struct its_baser * baser,u64 val)1696 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1697 u64 val)
1698 {
1699 u32 idx = baser - its->tables;
1700
1701 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1702 baser->val = its_read_baser(its, baser);
1703 }
1704
its_setup_baser(struct its_node * its,struct its_baser * baser,u64 cache,u64 shr,u32 psz,u32 order,bool indirect)1705 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1706 u64 cache, u64 shr, u32 psz, u32 order,
1707 bool indirect)
1708 {
1709 u64 val = its_read_baser(its, baser);
1710 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1711 u64 type = GITS_BASER_TYPE(val);
1712 u64 baser_phys, tmp;
1713 u32 alloc_pages;
1714 void *base;
1715
1716 retry_alloc_baser:
1717 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1718 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1719 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1720 &its->phys_base, its_base_type_string[type],
1721 alloc_pages, GITS_BASER_PAGES_MAX);
1722 alloc_pages = GITS_BASER_PAGES_MAX;
1723 order = get_order(GITS_BASER_PAGES_MAX * psz);
1724 }
1725
1726 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1727 if (!base)
1728 return -ENOMEM;
1729
1730 baser_phys = virt_to_phys(base);
1731
1732 /* Check if the physical address of the memory is above 48bits */
1733 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1734
1735 /* 52bit PA is supported only when PageSize=64K */
1736 if (psz != SZ_64K) {
1737 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1738 free_pages((unsigned long)base, order);
1739 return -ENXIO;
1740 }
1741
1742 /* Convert 52bit PA to 48bit field */
1743 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1744 }
1745
1746 retry_baser:
1747 val = (baser_phys |
1748 (type << GITS_BASER_TYPE_SHIFT) |
1749 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1750 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1751 cache |
1752 shr |
1753 GITS_BASER_VALID);
1754
1755 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1756
1757 switch (psz) {
1758 case SZ_4K:
1759 val |= GITS_BASER_PAGE_SIZE_4K;
1760 break;
1761 case SZ_16K:
1762 val |= GITS_BASER_PAGE_SIZE_16K;
1763 break;
1764 case SZ_64K:
1765 val |= GITS_BASER_PAGE_SIZE_64K;
1766 break;
1767 }
1768
1769 its_write_baser(its, baser, val);
1770 tmp = baser->val;
1771
1772 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1773 /*
1774 * Shareability didn't stick. Just use
1775 * whatever the read reported, which is likely
1776 * to be the only thing this redistributor
1777 * supports. If that's zero, make it
1778 * non-cacheable as well.
1779 */
1780 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1781 if (!shr) {
1782 cache = GITS_BASER_nC;
1783 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1784 }
1785 goto retry_baser;
1786 }
1787
1788 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1789 /*
1790 * Page size didn't stick. Let's try a smaller
1791 * size and retry. If we reach 4K, then
1792 * something is horribly wrong...
1793 */
1794 free_pages((unsigned long)base, order);
1795 baser->base = NULL;
1796
1797 switch (psz) {
1798 case SZ_16K:
1799 psz = SZ_4K;
1800 goto retry_alloc_baser;
1801 case SZ_64K:
1802 psz = SZ_16K;
1803 goto retry_alloc_baser;
1804 }
1805 }
1806
1807 if (val != tmp) {
1808 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1809 &its->phys_base, its_base_type_string[type],
1810 val, tmp);
1811 free_pages((unsigned long)base, order);
1812 return -ENXIO;
1813 }
1814
1815 baser->order = order;
1816 baser->base = base;
1817 baser->psz = psz;
1818 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1819
1820 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1821 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1822 its_base_type_string[type],
1823 (unsigned long)virt_to_phys(base),
1824 indirect ? "indirect" : "flat", (int)esz,
1825 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1826
1827 return 0;
1828 }
1829
its_parse_indirect_baser(struct its_node * its,struct its_baser * baser,u32 psz,u32 * order,u32 ids)1830 static bool its_parse_indirect_baser(struct its_node *its,
1831 struct its_baser *baser,
1832 u32 psz, u32 *order, u32 ids)
1833 {
1834 u64 tmp = its_read_baser(its, baser);
1835 u64 type = GITS_BASER_TYPE(tmp);
1836 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1837 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1838 u32 new_order = *order;
1839 bool indirect = false;
1840
1841 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1842 if ((esz << ids) > (psz * 2)) {
1843 /*
1844 * Find out whether hw supports a single or two-level table by
1845 * table by reading bit at offset '62' after writing '1' to it.
1846 */
1847 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1848 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1849
1850 if (indirect) {
1851 /*
1852 * The size of the lvl2 table is equal to ITS page size
1853 * which is 'psz'. For computing lvl1 table size,
1854 * subtract ID bits that sparse lvl2 table from 'ids'
1855 * which is reported by ITS hardware times lvl1 table
1856 * entry size.
1857 */
1858 ids -= ilog2(psz / (int)esz);
1859 esz = GITS_LVL1_ENTRY_SIZE;
1860 }
1861 }
1862
1863 /*
1864 * Allocate as many entries as required to fit the
1865 * range of device IDs that the ITS can grok... The ID
1866 * space being incredibly sparse, this results in a
1867 * massive waste of memory if two-level device table
1868 * feature is not supported by hardware.
1869 */
1870 new_order = max_t(u32, get_order(esz << ids), new_order);
1871 if (new_order >= MAX_ORDER) {
1872 new_order = MAX_ORDER - 1;
1873 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1874 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1875 &its->phys_base, its_base_type_string[type],
1876 its->device_ids, ids);
1877 }
1878
1879 *order = new_order;
1880
1881 return indirect;
1882 }
1883
its_free_tables(struct its_node * its)1884 static void its_free_tables(struct its_node *its)
1885 {
1886 int i;
1887
1888 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1889 if (its->tables[i].base) {
1890 free_pages((unsigned long)its->tables[i].base,
1891 its->tables[i].order);
1892 its->tables[i].base = NULL;
1893 }
1894 }
1895 }
1896
its_alloc_tables(struct its_node * its)1897 static int its_alloc_tables(struct its_node *its)
1898 {
1899 u64 shr = GITS_BASER_InnerShareable;
1900 u64 cache = GITS_BASER_RaWaWb;
1901 u32 psz = SZ_64K;
1902 int err, i;
1903
1904 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1905 /* erratum 24313: ignore memory access type */
1906 cache = GITS_BASER_nCnB;
1907
1908 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1909 struct its_baser *baser = its->tables + i;
1910 u64 val = its_read_baser(its, baser);
1911 u64 type = GITS_BASER_TYPE(val);
1912 u32 order = get_order(psz);
1913 bool indirect = false;
1914
1915 switch (type) {
1916 case GITS_BASER_TYPE_NONE:
1917 continue;
1918
1919 case GITS_BASER_TYPE_DEVICE:
1920 indirect = its_parse_indirect_baser(its, baser,
1921 psz, &order,
1922 its->device_ids);
1923 break;
1924
1925 case GITS_BASER_TYPE_VCPU:
1926 indirect = its_parse_indirect_baser(its, baser,
1927 psz, &order,
1928 ITS_MAX_VPEID_BITS);
1929 break;
1930 }
1931
1932 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1933 if (err < 0) {
1934 its_free_tables(its);
1935 return err;
1936 }
1937
1938 /* Update settings which will be used for next BASERn */
1939 psz = baser->psz;
1940 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1941 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1942 }
1943
1944 return 0;
1945 }
1946
its_alloc_collections(struct its_node * its)1947 static int its_alloc_collections(struct its_node *its)
1948 {
1949 int i;
1950
1951 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
1952 GFP_KERNEL);
1953 if (!its->collections)
1954 return -ENOMEM;
1955
1956 for (i = 0; i < nr_cpu_ids; i++)
1957 its->collections[i].target_address = ~0ULL;
1958
1959 return 0;
1960 }
1961
its_allocate_pending_table(gfp_t gfp_flags)1962 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1963 {
1964 struct page *pend_page;
1965 /*
1966 * The pending pages have to be at least 64kB aligned,
1967 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1968 */
1969 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1970 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1971 if (!pend_page)
1972 return NULL;
1973
1974 /* Make sure the GIC will observe the zero-ed page */
1975 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1976
1977 return pend_page;
1978 }
1979
its_free_pending_table(struct page * pt)1980 static void its_free_pending_table(struct page *pt)
1981 {
1982 free_pages((unsigned long)page_address(pt),
1983 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1984 }
1985
its_clear_vpend_valid(void __iomem * vlpi_base)1986 static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
1987 {
1988 u32 count = 1000000; /* 1s! */
1989 bool clean;
1990 u64 val;
1991
1992 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1993 val &= ~GICR_VPENDBASER_Valid;
1994 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
1995
1996 do {
1997 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1998 clean = !(val & GICR_VPENDBASER_Dirty);
1999 if (!clean) {
2000 count--;
2001 cpu_relax();
2002 udelay(1);
2003 }
2004 } while (!clean && count);
2005
2006 return val;
2007 }
2008
its_cpu_init_lpis(void)2009 static void its_cpu_init_lpis(void)
2010 {
2011 void __iomem *rbase = gic_data_rdist_rd_base();
2012 struct page *pend_page;
2013 u64 val, tmp;
2014
2015 /* If we didn't allocate the pending table yet, do it now */
2016 pend_page = gic_data_rdist()->pend_page;
2017 if (!pend_page) {
2018 phys_addr_t paddr;
2019
2020 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2021 if (!pend_page) {
2022 pr_err("Failed to allocate PENDBASE for CPU%d\n",
2023 smp_processor_id());
2024 return;
2025 }
2026
2027 paddr = page_to_phys(pend_page);
2028 pr_info("CPU%d: using LPI pending table @%pa\n",
2029 smp_processor_id(), &paddr);
2030 gic_data_rdist()->pend_page = pend_page;
2031 }
2032
2033 /* set PROPBASE */
2034 val = (page_to_phys(gic_rdists->prop_page) |
2035 GICR_PROPBASER_InnerShareable |
2036 GICR_PROPBASER_RaWaWb |
2037 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2038
2039 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2040 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2041
2042 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2043 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2044 /*
2045 * The HW reports non-shareable, we must
2046 * remove the cacheability attributes as
2047 * well.
2048 */
2049 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2050 GICR_PROPBASER_CACHEABILITY_MASK);
2051 val |= GICR_PROPBASER_nC;
2052 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2053 }
2054 pr_info_once("GIC: using cache flushing for LPI property table\n");
2055 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2056 }
2057
2058 /* set PENDBASE */
2059 val = (page_to_phys(pend_page) |
2060 GICR_PENDBASER_InnerShareable |
2061 GICR_PENDBASER_RaWaWb);
2062
2063 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2064 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2065
2066 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2067 /*
2068 * The HW reports non-shareable, we must remove the
2069 * cacheability attributes as well.
2070 */
2071 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2072 GICR_PENDBASER_CACHEABILITY_MASK);
2073 val |= GICR_PENDBASER_nC;
2074 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2075 }
2076
2077 /* Enable LPIs */
2078 val = readl_relaxed(rbase + GICR_CTLR);
2079 val |= GICR_CTLR_ENABLE_LPIS;
2080 writel_relaxed(val, rbase + GICR_CTLR);
2081
2082 if (gic_rdists->has_vlpis) {
2083 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2084
2085 /*
2086 * It's possible for CPU to receive VLPIs before it is
2087 * sheduled as a vPE, especially for the first CPU, and the
2088 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2089 * as out of range and dropped by GIC.
2090 * So we initialize IDbits to known value to avoid VLPI drop.
2091 */
2092 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2093 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2094 smp_processor_id(), val);
2095 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2096
2097 /*
2098 * Also clear Valid bit of GICR_VPENDBASER, in case some
2099 * ancient programming gets left in and has possibility of
2100 * corrupting memory.
2101 */
2102 val = its_clear_vpend_valid(vlpi_base);
2103 WARN_ON(val & GICR_VPENDBASER_Dirty);
2104 }
2105
2106 /* Make sure the GIC has seen the above */
2107 dsb(sy);
2108 }
2109
its_cpu_init_collection(struct its_node * its)2110 static void its_cpu_init_collection(struct its_node *its)
2111 {
2112 int cpu = smp_processor_id();
2113 u64 target;
2114
2115 /* avoid cross node collections and its mapping */
2116 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2117 struct device_node *cpu_node;
2118
2119 cpu_node = of_get_cpu_node(cpu, NULL);
2120 if (its->numa_node != NUMA_NO_NODE &&
2121 its->numa_node != of_node_to_nid(cpu_node))
2122 return;
2123 }
2124
2125 /*
2126 * We now have to bind each collection to its target
2127 * redistributor.
2128 */
2129 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2130 /*
2131 * This ITS wants the physical address of the
2132 * redistributor.
2133 */
2134 target = gic_data_rdist()->phys_base;
2135 } else {
2136 /* This ITS wants a linear CPU number. */
2137 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2138 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2139 }
2140
2141 /* Perform collection mapping */
2142 its->collections[cpu].target_address = target;
2143 its->collections[cpu].col_id = cpu;
2144
2145 its_send_mapc(its, &its->collections[cpu], 1);
2146 its_send_invall(its, &its->collections[cpu]);
2147 }
2148
its_cpu_init_collections(void)2149 static void its_cpu_init_collections(void)
2150 {
2151 struct its_node *its;
2152
2153 raw_spin_lock(&its_lock);
2154
2155 list_for_each_entry(its, &its_nodes, entry)
2156 its_cpu_init_collection(its);
2157
2158 raw_spin_unlock(&its_lock);
2159 }
2160
its_find_device(struct its_node * its,u32 dev_id)2161 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2162 {
2163 struct its_device *its_dev = NULL, *tmp;
2164 unsigned long flags;
2165
2166 raw_spin_lock_irqsave(&its->lock, flags);
2167
2168 list_for_each_entry(tmp, &its->its_device_list, entry) {
2169 if (tmp->device_id == dev_id) {
2170 its_dev = tmp;
2171 break;
2172 }
2173 }
2174
2175 raw_spin_unlock_irqrestore(&its->lock, flags);
2176
2177 return its_dev;
2178 }
2179
its_get_baser(struct its_node * its,u32 type)2180 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2181 {
2182 int i;
2183
2184 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2185 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2186 return &its->tables[i];
2187 }
2188
2189 return NULL;
2190 }
2191
its_alloc_table_entry(struct its_baser * baser,u32 id)2192 static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
2193 {
2194 struct page *page;
2195 u32 esz, idx;
2196 __le64 *table;
2197
2198 /* Don't allow device id that exceeds single, flat table limit */
2199 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2200 if (!(baser->val & GITS_BASER_INDIRECT))
2201 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2202
2203 /* Compute 1st level table index & check if that exceeds table limit */
2204 idx = id >> ilog2(baser->psz / esz);
2205 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2206 return false;
2207
2208 table = baser->base;
2209
2210 /* Allocate memory for 2nd level table */
2211 if (!table[idx]) {
2212 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2213 if (!page)
2214 return false;
2215
2216 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2217 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2218 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2219
2220 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2221
2222 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2223 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2224 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2225
2226 /* Ensure updated table contents are visible to ITS hardware */
2227 dsb(sy);
2228 }
2229
2230 return true;
2231 }
2232
its_alloc_device_table(struct its_node * its,u32 dev_id)2233 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2234 {
2235 struct its_baser *baser;
2236
2237 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2238
2239 /* Don't allow device id that exceeds ITS hardware limit */
2240 if (!baser)
2241 return (ilog2(dev_id) < its->device_ids);
2242
2243 return its_alloc_table_entry(baser, dev_id);
2244 }
2245
its_alloc_vpe_table(u32 vpe_id)2246 static bool its_alloc_vpe_table(u32 vpe_id)
2247 {
2248 struct its_node *its;
2249
2250 /*
2251 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2252 * could try and only do it on ITSs corresponding to devices
2253 * that have interrupts targeted at this VPE, but the
2254 * complexity becomes crazy (and you have tons of memory
2255 * anyway, right?).
2256 */
2257 list_for_each_entry(its, &its_nodes, entry) {
2258 struct its_baser *baser;
2259
2260 if (!its->is_v4)
2261 continue;
2262
2263 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2264 if (!baser)
2265 return false;
2266
2267 if (!its_alloc_table_entry(baser, vpe_id))
2268 return false;
2269 }
2270
2271 return true;
2272 }
2273
its_create_device(struct its_node * its,u32 dev_id,int nvecs,bool alloc_lpis)2274 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2275 int nvecs, bool alloc_lpis)
2276 {
2277 struct its_device *dev;
2278 unsigned long *lpi_map = NULL;
2279 unsigned long flags;
2280 u16 *col_map = NULL;
2281 void *itt;
2282 int lpi_base;
2283 int nr_lpis;
2284 int nr_ites;
2285 int sz;
2286
2287 if (!its_alloc_device_table(its, dev_id))
2288 return NULL;
2289
2290 if (WARN_ON(!is_power_of_2(nvecs)))
2291 nvecs = roundup_pow_of_two(nvecs);
2292
2293 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2294 /*
2295 * Even if the device wants a single LPI, the ITT must be
2296 * sized as a power of two (and you need at least one bit...).
2297 */
2298 nr_ites = max(2, nvecs);
2299 sz = nr_ites * its->ite_size;
2300 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2301 itt = kzalloc(sz, GFP_KERNEL);
2302 if (alloc_lpis) {
2303 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2304 if (lpi_map)
2305 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2306 GFP_KERNEL);
2307 } else {
2308 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2309 nr_lpis = 0;
2310 lpi_base = 0;
2311 }
2312
2313 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2314 kfree(dev);
2315 kfree(itt);
2316 kfree(lpi_map);
2317 kfree(col_map);
2318 return NULL;
2319 }
2320
2321 gic_flush_dcache_to_poc(itt, sz);
2322
2323 dev->its = its;
2324 dev->itt = itt;
2325 dev->nr_ites = nr_ites;
2326 dev->event_map.lpi_map = lpi_map;
2327 dev->event_map.col_map = col_map;
2328 dev->event_map.lpi_base = lpi_base;
2329 dev->event_map.nr_lpis = nr_lpis;
2330 mutex_init(&dev->event_map.vlpi_lock);
2331 dev->device_id = dev_id;
2332 INIT_LIST_HEAD(&dev->entry);
2333
2334 raw_spin_lock_irqsave(&its->lock, flags);
2335 list_add(&dev->entry, &its->its_device_list);
2336 raw_spin_unlock_irqrestore(&its->lock, flags);
2337
2338 /* Map device to its ITT */
2339 its_send_mapd(dev, 1);
2340
2341 return dev;
2342 }
2343
its_free_device(struct its_device * its_dev)2344 static void its_free_device(struct its_device *its_dev)
2345 {
2346 unsigned long flags;
2347
2348 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2349 list_del(&its_dev->entry);
2350 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2351 kfree(its_dev->itt);
2352 kfree(its_dev);
2353 }
2354
its_alloc_device_irq(struct its_device * dev,int nvecs,irq_hw_number_t * hwirq)2355 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2356 {
2357 int idx;
2358
2359 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2360 dev->event_map.nr_lpis,
2361 get_count_order(nvecs));
2362 if (idx < 0)
2363 return -ENOSPC;
2364
2365 *hwirq = dev->event_map.lpi_base + idx;
2366 set_bit(idx, dev->event_map.lpi_map);
2367
2368 return 0;
2369 }
2370
its_msi_prepare(struct irq_domain * domain,struct device * dev,int nvec,msi_alloc_info_t * info)2371 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2372 int nvec, msi_alloc_info_t *info)
2373 {
2374 struct its_node *its;
2375 struct its_device *its_dev;
2376 struct msi_domain_info *msi_info;
2377 u32 dev_id;
2378 int err = 0;
2379
2380 /*
2381 * We ignore "dev" entierely, and rely on the dev_id that has
2382 * been passed via the scratchpad. This limits this domain's
2383 * usefulness to upper layers that definitely know that they
2384 * are built on top of the ITS.
2385 */
2386 dev_id = info->scratchpad[0].ul;
2387
2388 msi_info = msi_get_domain_info(domain);
2389 its = msi_info->data;
2390
2391 if (!gic_rdists->has_direct_lpi &&
2392 vpe_proxy.dev &&
2393 vpe_proxy.dev->its == its &&
2394 dev_id == vpe_proxy.dev->device_id) {
2395 /* Bad luck. Get yourself a better implementation */
2396 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2397 dev_id);
2398 return -EINVAL;
2399 }
2400
2401 mutex_lock(&its->dev_alloc_lock);
2402 its_dev = its_find_device(its, dev_id);
2403 if (its_dev) {
2404 /*
2405 * We already have seen this ID, probably through
2406 * another alias (PCI bridge of some sort). No need to
2407 * create the device.
2408 */
2409 its_dev->shared = true;
2410 pr_debug("Reusing ITT for devID %x\n", dev_id);
2411 goto out;
2412 }
2413
2414 its_dev = its_create_device(its, dev_id, nvec, true);
2415 if (!its_dev) {
2416 err = -ENOMEM;
2417 goto out;
2418 }
2419
2420 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2421 out:
2422 mutex_unlock(&its->dev_alloc_lock);
2423 info->scratchpad[0].ptr = its_dev;
2424 return err;
2425 }
2426
2427 static struct msi_domain_ops its_msi_domain_ops = {
2428 .msi_prepare = its_msi_prepare,
2429 };
2430
its_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)2431 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2432 unsigned int virq,
2433 irq_hw_number_t hwirq)
2434 {
2435 struct irq_fwspec fwspec;
2436
2437 if (irq_domain_get_of_node(domain->parent)) {
2438 fwspec.fwnode = domain->parent->fwnode;
2439 fwspec.param_count = 3;
2440 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2441 fwspec.param[1] = hwirq;
2442 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2443 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2444 fwspec.fwnode = domain->parent->fwnode;
2445 fwspec.param_count = 2;
2446 fwspec.param[0] = hwirq;
2447 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2448 } else {
2449 return -EINVAL;
2450 }
2451
2452 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2453 }
2454
its_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)2455 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2456 unsigned int nr_irqs, void *args)
2457 {
2458 msi_alloc_info_t *info = args;
2459 struct its_device *its_dev = info->scratchpad[0].ptr;
2460 struct irq_data *irqd;
2461 irq_hw_number_t hwirq;
2462 int err;
2463 int i;
2464
2465 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2466 if (err)
2467 return err;
2468
2469 for (i = 0; i < nr_irqs; i++) {
2470 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2471 if (err)
2472 return err;
2473
2474 irq_domain_set_hwirq_and_chip(domain, virq + i,
2475 hwirq + i, &its_irq_chip, its_dev);
2476 irqd = irq_get_irq_data(virq + i);
2477 irqd_set_single_target(irqd);
2478 irqd_set_affinity_on_activate(irqd);
2479 pr_debug("ID:%d pID:%d vID:%d\n",
2480 (int)(hwirq + i - its_dev->event_map.lpi_base),
2481 (int)(hwirq + i), virq + i);
2482 }
2483
2484 return 0;
2485 }
2486
its_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)2487 static int its_irq_domain_activate(struct irq_domain *domain,
2488 struct irq_data *d, bool reserve)
2489 {
2490 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2491 u32 event = its_get_event_id(d);
2492 const struct cpumask *cpu_mask = cpu_online_mask;
2493 int cpu;
2494
2495 /* get the cpu_mask of local node */
2496 if (its_dev->its->numa_node >= 0)
2497 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2498
2499 /* Bind the LPI to the first possible CPU */
2500 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2501 if (cpu >= nr_cpu_ids) {
2502 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2503 return -EINVAL;
2504
2505 cpu = cpumask_first(cpu_online_mask);
2506 }
2507
2508 its_dev->event_map.col_map[event] = cpu;
2509 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2510
2511 /* Map the GIC IRQ and event to the device */
2512 its_send_mapti(its_dev, d->hwirq, event);
2513 return 0;
2514 }
2515
its_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)2516 static void its_irq_domain_deactivate(struct irq_domain *domain,
2517 struct irq_data *d)
2518 {
2519 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2520 u32 event = its_get_event_id(d);
2521
2522 /* Stop the delivery of interrupts */
2523 its_send_discard(its_dev, event);
2524 }
2525
its_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2526 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2527 unsigned int nr_irqs)
2528 {
2529 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2530 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2531 struct its_node *its = its_dev->its;
2532 int i;
2533
2534 bitmap_release_region(its_dev->event_map.lpi_map,
2535 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2536 get_count_order(nr_irqs));
2537
2538 for (i = 0; i < nr_irqs; i++) {
2539 struct irq_data *data = irq_domain_get_irq_data(domain,
2540 virq + i);
2541 /* Nuke the entry in the domain */
2542 irq_domain_reset_irq_data(data);
2543 }
2544
2545 mutex_lock(&its->dev_alloc_lock);
2546
2547 /*
2548 * If all interrupts have been freed, start mopping the
2549 * floor. This is conditionned on the device not being shared.
2550 */
2551 if (!its_dev->shared &&
2552 bitmap_empty(its_dev->event_map.lpi_map,
2553 its_dev->event_map.nr_lpis)) {
2554 its_lpi_free(its_dev->event_map.lpi_map,
2555 its_dev->event_map.lpi_base,
2556 its_dev->event_map.nr_lpis);
2557 kfree(its_dev->event_map.col_map);
2558
2559 /* Unmap device/itt */
2560 its_send_mapd(its_dev, 0);
2561 its_free_device(its_dev);
2562 }
2563
2564 mutex_unlock(&its->dev_alloc_lock);
2565
2566 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2567 }
2568
2569 static const struct irq_domain_ops its_domain_ops = {
2570 .alloc = its_irq_domain_alloc,
2571 .free = its_irq_domain_free,
2572 .activate = its_irq_domain_activate,
2573 .deactivate = its_irq_domain_deactivate,
2574 };
2575
2576 /*
2577 * This is insane.
2578 *
2579 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2580 * likely), the only way to perform an invalidate is to use a fake
2581 * device to issue an INV command, implying that the LPI has first
2582 * been mapped to some event on that device. Since this is not exactly
2583 * cheap, we try to keep that mapping around as long as possible, and
2584 * only issue an UNMAP if we're short on available slots.
2585 *
2586 * Broken by design(tm).
2587 */
its_vpe_db_proxy_unmap_locked(struct its_vpe * vpe)2588 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2589 {
2590 /* Already unmapped? */
2591 if (vpe->vpe_proxy_event == -1)
2592 return;
2593
2594 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2595 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2596
2597 /*
2598 * We don't track empty slots at all, so let's move the
2599 * next_victim pointer if we can quickly reuse that slot
2600 * instead of nuking an existing entry. Not clear that this is
2601 * always a win though, and this might just generate a ripple
2602 * effect... Let's just hope VPEs don't migrate too often.
2603 */
2604 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2605 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2606
2607 vpe->vpe_proxy_event = -1;
2608 }
2609
its_vpe_db_proxy_unmap(struct its_vpe * vpe)2610 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2611 {
2612 if (!gic_rdists->has_direct_lpi) {
2613 unsigned long flags;
2614
2615 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2616 its_vpe_db_proxy_unmap_locked(vpe);
2617 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2618 }
2619 }
2620
its_vpe_db_proxy_map_locked(struct its_vpe * vpe)2621 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2622 {
2623 /* Already mapped? */
2624 if (vpe->vpe_proxy_event != -1)
2625 return;
2626
2627 /* This slot was already allocated. Kick the other VPE out. */
2628 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2629 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2630
2631 /* Map the new VPE instead */
2632 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2633 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2634 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2635
2636 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2637 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2638 }
2639
its_vpe_db_proxy_move(struct its_vpe * vpe,int from,int to)2640 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2641 {
2642 unsigned long flags;
2643 struct its_collection *target_col;
2644
2645 if (gic_rdists->has_direct_lpi) {
2646 void __iomem *rdbase;
2647
2648 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2649 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2650 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2651 cpu_relax();
2652
2653 return;
2654 }
2655
2656 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2657
2658 its_vpe_db_proxy_map_locked(vpe);
2659
2660 target_col = &vpe_proxy.dev->its->collections[to];
2661 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2662 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2663
2664 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2665 }
2666
its_vpe_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)2667 static int its_vpe_set_affinity(struct irq_data *d,
2668 const struct cpumask *mask_val,
2669 bool force)
2670 {
2671 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2672 int cpu = cpumask_first(mask_val);
2673
2674 /*
2675 * Changing affinity is mega expensive, so let's be as lazy as
2676 * we can and only do it if we really have to. Also, if mapped
2677 * into the proxy device, we need to move the doorbell
2678 * interrupt to its new location.
2679 */
2680 if (vpe->col_idx != cpu) {
2681 int from = vpe->col_idx;
2682
2683 vpe->col_idx = cpu;
2684 its_send_vmovp(vpe);
2685 its_vpe_db_proxy_move(vpe, from, cpu);
2686 }
2687
2688 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2689
2690 return IRQ_SET_MASK_OK_DONE;
2691 }
2692
its_vpe_schedule(struct its_vpe * vpe)2693 static void its_vpe_schedule(struct its_vpe *vpe)
2694 {
2695 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2696 u64 val;
2697
2698 /* Schedule the VPE */
2699 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2700 GENMASK_ULL(51, 12);
2701 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2702 val |= GICR_VPROPBASER_RaWb;
2703 val |= GICR_VPROPBASER_InnerShareable;
2704 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2705
2706 val = virt_to_phys(page_address(vpe->vpt_page)) &
2707 GENMASK_ULL(51, 16);
2708 val |= GICR_VPENDBASER_RaWaWb;
2709 val |= GICR_VPENDBASER_NonShareable;
2710 /*
2711 * There is no good way of finding out if the pending table is
2712 * empty as we can race against the doorbell interrupt very
2713 * easily. So in the end, vpe->pending_last is only an
2714 * indication that the vcpu has something pending, not one
2715 * that the pending table is empty. A good implementation
2716 * would be able to read its coarse map pretty quickly anyway,
2717 * making this a tolerable issue.
2718 */
2719 val |= GICR_VPENDBASER_PendingLast;
2720 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2721 val |= GICR_VPENDBASER_Valid;
2722 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2723 }
2724
its_vpe_deschedule(struct its_vpe * vpe)2725 static void its_vpe_deschedule(struct its_vpe *vpe)
2726 {
2727 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2728 u64 val;
2729
2730 val = its_clear_vpend_valid(vlpi_base);
2731
2732 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2733 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2734 vpe->idai = false;
2735 vpe->pending_last = true;
2736 } else {
2737 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2738 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2739 }
2740 }
2741
its_vpe_invall(struct its_vpe * vpe)2742 static void its_vpe_invall(struct its_vpe *vpe)
2743 {
2744 struct its_node *its;
2745
2746 list_for_each_entry(its, &its_nodes, entry) {
2747 if (!its->is_v4)
2748 continue;
2749
2750 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2751 continue;
2752
2753 /*
2754 * Sending a VINVALL to a single ITS is enough, as all
2755 * we need is to reach the redistributors.
2756 */
2757 its_send_vinvall(its, vpe);
2758 return;
2759 }
2760 }
2761
its_vpe_set_vcpu_affinity(struct irq_data * d,void * vcpu_info)2762 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2763 {
2764 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2765 struct its_cmd_info *info = vcpu_info;
2766
2767 switch (info->cmd_type) {
2768 case SCHEDULE_VPE:
2769 its_vpe_schedule(vpe);
2770 return 0;
2771
2772 case DESCHEDULE_VPE:
2773 its_vpe_deschedule(vpe);
2774 return 0;
2775
2776 case INVALL_VPE:
2777 its_vpe_invall(vpe);
2778 return 0;
2779
2780 default:
2781 return -EINVAL;
2782 }
2783 }
2784
its_vpe_send_cmd(struct its_vpe * vpe,void (* cmd)(struct its_device *,u32))2785 static void its_vpe_send_cmd(struct its_vpe *vpe,
2786 void (*cmd)(struct its_device *, u32))
2787 {
2788 unsigned long flags;
2789
2790 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2791
2792 its_vpe_db_proxy_map_locked(vpe);
2793 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2794
2795 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2796 }
2797
its_vpe_send_inv(struct irq_data * d)2798 static void its_vpe_send_inv(struct irq_data *d)
2799 {
2800 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2801
2802 if (gic_rdists->has_direct_lpi) {
2803 void __iomem *rdbase;
2804
2805 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2806 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2807 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2808 cpu_relax();
2809 } else {
2810 its_vpe_send_cmd(vpe, its_send_inv);
2811 }
2812 }
2813
its_vpe_mask_irq(struct irq_data * d)2814 static void its_vpe_mask_irq(struct irq_data *d)
2815 {
2816 /*
2817 * We need to unmask the LPI, which is described by the parent
2818 * irq_data. Instead of calling into the parent (which won't
2819 * exactly do the right thing, let's simply use the
2820 * parent_data pointer. Yes, I'm naughty.
2821 */
2822 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2823 its_vpe_send_inv(d);
2824 }
2825
its_vpe_unmask_irq(struct irq_data * d)2826 static void its_vpe_unmask_irq(struct irq_data *d)
2827 {
2828 /* Same hack as above... */
2829 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2830 its_vpe_send_inv(d);
2831 }
2832
its_vpe_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool state)2833 static int its_vpe_set_irqchip_state(struct irq_data *d,
2834 enum irqchip_irq_state which,
2835 bool state)
2836 {
2837 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2838
2839 if (which != IRQCHIP_STATE_PENDING)
2840 return -EINVAL;
2841
2842 if (gic_rdists->has_direct_lpi) {
2843 void __iomem *rdbase;
2844
2845 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2846 if (state) {
2847 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2848 } else {
2849 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2850 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2851 cpu_relax();
2852 }
2853 } else {
2854 if (state)
2855 its_vpe_send_cmd(vpe, its_send_int);
2856 else
2857 its_vpe_send_cmd(vpe, its_send_clear);
2858 }
2859
2860 return 0;
2861 }
2862
its_vpe_retrigger(struct irq_data * d)2863 static int its_vpe_retrigger(struct irq_data *d)
2864 {
2865 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
2866 }
2867
2868 static struct irq_chip its_vpe_irq_chip = {
2869 .name = "GICv4-vpe",
2870 .irq_mask = its_vpe_mask_irq,
2871 .irq_unmask = its_vpe_unmask_irq,
2872 .irq_eoi = irq_chip_eoi_parent,
2873 .irq_set_affinity = its_vpe_set_affinity,
2874 .irq_retrigger = its_vpe_retrigger,
2875 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2876 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2877 };
2878
its_vpe_id_alloc(void)2879 static int its_vpe_id_alloc(void)
2880 {
2881 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2882 }
2883
its_vpe_id_free(u16 id)2884 static void its_vpe_id_free(u16 id)
2885 {
2886 ida_simple_remove(&its_vpeid_ida, id);
2887 }
2888
its_vpe_init(struct its_vpe * vpe)2889 static int its_vpe_init(struct its_vpe *vpe)
2890 {
2891 struct page *vpt_page;
2892 int vpe_id;
2893
2894 /* Allocate vpe_id */
2895 vpe_id = its_vpe_id_alloc();
2896 if (vpe_id < 0)
2897 return vpe_id;
2898
2899 /* Allocate VPT */
2900 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2901 if (!vpt_page) {
2902 its_vpe_id_free(vpe_id);
2903 return -ENOMEM;
2904 }
2905
2906 if (!its_alloc_vpe_table(vpe_id)) {
2907 its_vpe_id_free(vpe_id);
2908 its_free_pending_table(vpt_page);
2909 return -ENOMEM;
2910 }
2911
2912 vpe->vpe_id = vpe_id;
2913 vpe->vpt_page = vpt_page;
2914 vpe->vpe_proxy_event = -1;
2915
2916 return 0;
2917 }
2918
its_vpe_teardown(struct its_vpe * vpe)2919 static void its_vpe_teardown(struct its_vpe *vpe)
2920 {
2921 its_vpe_db_proxy_unmap(vpe);
2922 its_vpe_id_free(vpe->vpe_id);
2923 its_free_pending_table(vpe->vpt_page);
2924 }
2925
its_vpe_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)2926 static void its_vpe_irq_domain_free(struct irq_domain *domain,
2927 unsigned int virq,
2928 unsigned int nr_irqs)
2929 {
2930 struct its_vm *vm = domain->host_data;
2931 int i;
2932
2933 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2934
2935 for (i = 0; i < nr_irqs; i++) {
2936 struct irq_data *data = irq_domain_get_irq_data(domain,
2937 virq + i);
2938 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2939
2940 BUG_ON(vm != vpe->its_vm);
2941
2942 clear_bit(data->hwirq, vm->db_bitmap);
2943 its_vpe_teardown(vpe);
2944 irq_domain_reset_irq_data(data);
2945 }
2946
2947 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2948 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2949 its_free_prop_table(vm->vprop_page);
2950 }
2951 }
2952
its_vpe_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)2953 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2954 unsigned int nr_irqs, void *args)
2955 {
2956 struct its_vm *vm = args;
2957 unsigned long *bitmap;
2958 struct page *vprop_page;
2959 int base, nr_ids, i, err = 0;
2960
2961 BUG_ON(!vm);
2962
2963 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
2964 if (!bitmap)
2965 return -ENOMEM;
2966
2967 if (nr_ids < nr_irqs) {
2968 its_lpi_free(bitmap, base, nr_ids);
2969 return -ENOMEM;
2970 }
2971
2972 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2973 if (!vprop_page) {
2974 its_lpi_free(bitmap, base, nr_ids);
2975 return -ENOMEM;
2976 }
2977
2978 vm->db_bitmap = bitmap;
2979 vm->db_lpi_base = base;
2980 vm->nr_db_lpis = nr_ids;
2981 vm->vprop_page = vprop_page;
2982
2983 for (i = 0; i < nr_irqs; i++) {
2984 vm->vpes[i]->vpe_db_lpi = base + i;
2985 err = its_vpe_init(vm->vpes[i]);
2986 if (err)
2987 break;
2988 err = its_irq_gic_domain_alloc(domain, virq + i,
2989 vm->vpes[i]->vpe_db_lpi);
2990 if (err)
2991 break;
2992 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2993 &its_vpe_irq_chip, vm->vpes[i]);
2994 set_bit(i, bitmap);
2995 }
2996
2997 if (err) {
2998 if (i > 0)
2999 its_vpe_irq_domain_free(domain, virq, i);
3000
3001 its_lpi_free(bitmap, base, nr_ids);
3002 its_free_prop_table(vprop_page);
3003 }
3004
3005 return err;
3006 }
3007
its_vpe_irq_domain_activate(struct irq_domain * domain,struct irq_data * d,bool reserve)3008 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3009 struct irq_data *d, bool reserve)
3010 {
3011 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3012 struct its_node *its;
3013
3014 /* If we use the list map, we issue VMAPP on demand... */
3015 if (its_list_map)
3016 return 0;
3017
3018 /* Map the VPE to the first possible CPU */
3019 vpe->col_idx = cpumask_first(cpu_online_mask);
3020
3021 list_for_each_entry(its, &its_nodes, entry) {
3022 if (!its->is_v4)
3023 continue;
3024
3025 its_send_vmapp(its, vpe, true);
3026 its_send_vinvall(its, vpe);
3027 }
3028
3029 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3030
3031 return 0;
3032 }
3033
its_vpe_irq_domain_deactivate(struct irq_domain * domain,struct irq_data * d)3034 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3035 struct irq_data *d)
3036 {
3037 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3038 struct its_node *its;
3039
3040 /*
3041 * If we use the list map, we unmap the VPE once no VLPIs are
3042 * associated with the VM.
3043 */
3044 if (its_list_map)
3045 return;
3046
3047 list_for_each_entry(its, &its_nodes, entry) {
3048 if (!its->is_v4)
3049 continue;
3050
3051 its_send_vmapp(its, vpe, false);
3052 }
3053 }
3054
3055 static const struct irq_domain_ops its_vpe_domain_ops = {
3056 .alloc = its_vpe_irq_domain_alloc,
3057 .free = its_vpe_irq_domain_free,
3058 .activate = its_vpe_irq_domain_activate,
3059 .deactivate = its_vpe_irq_domain_deactivate,
3060 };
3061
its_force_quiescent(void __iomem * base)3062 static int its_force_quiescent(void __iomem *base)
3063 {
3064 u32 count = 1000000; /* 1s */
3065 u32 val;
3066
3067 val = readl_relaxed(base + GITS_CTLR);
3068 /*
3069 * GIC architecture specification requires the ITS to be both
3070 * disabled and quiescent for writes to GITS_BASER<n> or
3071 * GITS_CBASER to not have UNPREDICTABLE results.
3072 */
3073 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
3074 return 0;
3075
3076 /* Disable the generation of all interrupts to this ITS */
3077 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
3078 writel_relaxed(val, base + GITS_CTLR);
3079
3080 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3081 while (1) {
3082 val = readl_relaxed(base + GITS_CTLR);
3083 if (val & GITS_CTLR_QUIESCENT)
3084 return 0;
3085
3086 count--;
3087 if (!count)
3088 return -EBUSY;
3089
3090 cpu_relax();
3091 udelay(1);
3092 }
3093 }
3094
its_enable_quirk_cavium_22375(void * data)3095 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
3096 {
3097 struct its_node *its = data;
3098
3099 /* erratum 22375: only alloc 8MB table size */
3100 its->device_ids = 0x14; /* 20 bits, 8MB */
3101 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3102
3103 return true;
3104 }
3105
its_enable_quirk_cavium_23144(void * data)3106 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3107 {
3108 struct its_node *its = data;
3109
3110 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3111
3112 return true;
3113 }
3114
its_enable_quirk_qdf2400_e0065(void * data)3115 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
3116 {
3117 struct its_node *its = data;
3118
3119 /* On QDF2400, the size of the ITE is 16Bytes */
3120 its->ite_size = 16;
3121
3122 return true;
3123 }
3124
its_irq_get_msi_base_pre_its(struct its_device * its_dev)3125 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3126 {
3127 struct its_node *its = its_dev->its;
3128
3129 /*
3130 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3131 * which maps 32-bit writes targeted at a separate window of
3132 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3133 * with device ID taken from bits [device_id_bits + 1:2] of
3134 * the window offset.
3135 */
3136 return its->pre_its_base + (its_dev->device_id << 2);
3137 }
3138
its_enable_quirk_socionext_synquacer(void * data)3139 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3140 {
3141 struct its_node *its = data;
3142 u32 pre_its_window[2];
3143 u32 ids;
3144
3145 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3146 "socionext,synquacer-pre-its",
3147 pre_its_window,
3148 ARRAY_SIZE(pre_its_window))) {
3149
3150 its->pre_its_base = pre_its_window[0];
3151 its->get_msi_base = its_irq_get_msi_base_pre_its;
3152
3153 ids = ilog2(pre_its_window[1]) - 2;
3154 if (its->device_ids > ids)
3155 its->device_ids = ids;
3156
3157 /* the pre-ITS breaks isolation, so disable MSI remapping */
3158 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3159 return true;
3160 }
3161 return false;
3162 }
3163
its_enable_quirk_hip07_161600802(void * data)3164 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3165 {
3166 struct its_node *its = data;
3167
3168 /*
3169 * Hip07 insists on using the wrong address for the VLPI
3170 * page. Trick it into doing the right thing...
3171 */
3172 its->vlpi_redist_offset = SZ_128K;
3173 return true;
3174 }
3175
3176 static const struct gic_quirk its_quirks[] = {
3177 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3178 {
3179 .desc = "ITS: Cavium errata 22375, 24313",
3180 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3181 .mask = 0xffff0fff,
3182 .init = its_enable_quirk_cavium_22375,
3183 },
3184 #endif
3185 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3186 {
3187 .desc = "ITS: Cavium erratum 23144",
3188 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3189 .mask = 0xffff0fff,
3190 .init = its_enable_quirk_cavium_23144,
3191 },
3192 #endif
3193 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3194 {
3195 .desc = "ITS: QDF2400 erratum 0065",
3196 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3197 .mask = 0xffffffff,
3198 .init = its_enable_quirk_qdf2400_e0065,
3199 },
3200 #endif
3201 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3202 {
3203 /*
3204 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3205 * implementation, but with a 'pre-ITS' added that requires
3206 * special handling in software.
3207 */
3208 .desc = "ITS: Socionext Synquacer pre-ITS",
3209 .iidr = 0x0001143b,
3210 .mask = 0xffffffff,
3211 .init = its_enable_quirk_socionext_synquacer,
3212 },
3213 #endif
3214 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3215 {
3216 .desc = "ITS: Hip07 erratum 161600802",
3217 .iidr = 0x00000004,
3218 .mask = 0xffffffff,
3219 .init = its_enable_quirk_hip07_161600802,
3220 },
3221 #endif
3222 {
3223 }
3224 };
3225
its_enable_quirks(struct its_node * its)3226 static void its_enable_quirks(struct its_node *its)
3227 {
3228 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3229
3230 gic_enable_quirks(iidr, its_quirks, its);
3231 }
3232
its_save_disable(void)3233 static int its_save_disable(void)
3234 {
3235 struct its_node *its;
3236 int err = 0;
3237
3238 raw_spin_lock(&its_lock);
3239 list_for_each_entry(its, &its_nodes, entry) {
3240 void __iomem *base;
3241
3242 base = its->base;
3243 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3244 err = its_force_quiescent(base);
3245 if (err) {
3246 pr_err("ITS@%pa: failed to quiesce: %d\n",
3247 &its->phys_base, err);
3248 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3249 goto err;
3250 }
3251
3252 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3253 }
3254
3255 err:
3256 if (err) {
3257 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3258 void __iomem *base;
3259
3260 base = its->base;
3261 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3262 }
3263 }
3264 raw_spin_unlock(&its_lock);
3265
3266 return err;
3267 }
3268
its_restore_enable(void)3269 static void its_restore_enable(void)
3270 {
3271 struct its_node *its;
3272 int ret;
3273
3274 raw_spin_lock(&its_lock);
3275 list_for_each_entry(its, &its_nodes, entry) {
3276 void __iomem *base;
3277 int i;
3278
3279 base = its->base;
3280
3281 /*
3282 * Make sure that the ITS is disabled. If it fails to quiesce,
3283 * don't restore it since writing to CBASER or BASER<n>
3284 * registers is undefined according to the GIC v3 ITS
3285 * Specification.
3286 *
3287 * Firmware resuming with the ITS enabled is terminally broken.
3288 */
3289 WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
3290 ret = its_force_quiescent(base);
3291 if (ret) {
3292 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3293 &its->phys_base, ret);
3294 continue;
3295 }
3296
3297 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3298
3299 /*
3300 * Writing CBASER resets CREADR to 0, so make CWRITER and
3301 * cmd_write line up with it.
3302 */
3303 its->cmd_write = its->cmd_base;
3304 gits_write_cwriter(0, base + GITS_CWRITER);
3305
3306 /* Restore GITS_BASER from the value cache. */
3307 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3308 struct its_baser *baser = &its->tables[i];
3309
3310 if (!(baser->val & GITS_BASER_VALID))
3311 continue;
3312
3313 its_write_baser(its, baser, baser->val);
3314 }
3315 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3316
3317 /*
3318 * Reinit the collection if it's stored in the ITS. This is
3319 * indicated by the col_id being less than the HCC field.
3320 * CID < HCC as specified in the GIC v3 Documentation.
3321 */
3322 if (its->collections[smp_processor_id()].col_id <
3323 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3324 its_cpu_init_collection(its);
3325 }
3326 raw_spin_unlock(&its_lock);
3327 }
3328
3329 static struct syscore_ops its_syscore_ops = {
3330 .suspend = its_save_disable,
3331 .resume = its_restore_enable,
3332 };
3333
its_init_domain(struct fwnode_handle * handle,struct its_node * its)3334 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3335 {
3336 struct irq_domain *inner_domain;
3337 struct msi_domain_info *info;
3338
3339 info = kzalloc(sizeof(*info), GFP_KERNEL);
3340 if (!info)
3341 return -ENOMEM;
3342
3343 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3344 if (!inner_domain) {
3345 kfree(info);
3346 return -ENOMEM;
3347 }
3348
3349 inner_domain->parent = its_parent;
3350 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3351 inner_domain->flags |= its->msi_domain_flags;
3352 info->ops = &its_msi_domain_ops;
3353 info->data = its;
3354 inner_domain->host_data = info;
3355
3356 return 0;
3357 }
3358
its_init_vpe_domain(void)3359 static int its_init_vpe_domain(void)
3360 {
3361 struct its_node *its;
3362 u32 devid;
3363 int entries;
3364
3365 if (gic_rdists->has_direct_lpi) {
3366 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3367 return 0;
3368 }
3369
3370 /* Any ITS will do, even if not v4 */
3371 its = list_first_entry(&its_nodes, struct its_node, entry);
3372
3373 entries = roundup_pow_of_two(nr_cpu_ids);
3374 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
3375 GFP_KERNEL);
3376 if (!vpe_proxy.vpes) {
3377 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3378 return -ENOMEM;
3379 }
3380
3381 /* Use the last possible DevID */
3382 devid = GENMASK(its->device_ids - 1, 0);
3383 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3384 if (!vpe_proxy.dev) {
3385 kfree(vpe_proxy.vpes);
3386 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3387 return -ENOMEM;
3388 }
3389
3390 BUG_ON(entries > vpe_proxy.dev->nr_ites);
3391
3392 raw_spin_lock_init(&vpe_proxy.lock);
3393 vpe_proxy.next_victim = 0;
3394 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3395 devid, vpe_proxy.dev->nr_ites);
3396
3397 return 0;
3398 }
3399
its_compute_its_list_map(struct resource * res,void __iomem * its_base)3400 static int __init its_compute_its_list_map(struct resource *res,
3401 void __iomem *its_base)
3402 {
3403 int its_number;
3404 u32 ctlr;
3405
3406 /*
3407 * This is assumed to be done early enough that we're
3408 * guaranteed to be single-threaded, hence no
3409 * locking. Should this change, we should address
3410 * this.
3411 */
3412 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3413 if (its_number >= GICv4_ITS_LIST_MAX) {
3414 pr_err("ITS@%pa: No ITSList entry available!\n",
3415 &res->start);
3416 return -EINVAL;
3417 }
3418
3419 ctlr = readl_relaxed(its_base + GITS_CTLR);
3420 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3421 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3422 writel_relaxed(ctlr, its_base + GITS_CTLR);
3423 ctlr = readl_relaxed(its_base + GITS_CTLR);
3424 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3425 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3426 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3427 }
3428
3429 if (test_and_set_bit(its_number, &its_list_map)) {
3430 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3431 &res->start, its_number);
3432 return -EINVAL;
3433 }
3434
3435 return its_number;
3436 }
3437
its_probe_one(struct resource * res,struct fwnode_handle * handle,int numa_node)3438 static int __init its_probe_one(struct resource *res,
3439 struct fwnode_handle *handle, int numa_node)
3440 {
3441 struct its_node *its;
3442 void __iomem *its_base;
3443 u32 val, ctlr;
3444 u64 baser, tmp, typer;
3445 int err;
3446
3447 its_base = ioremap(res->start, resource_size(res));
3448 if (!its_base) {
3449 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
3450 return -ENOMEM;
3451 }
3452
3453 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3454 if (val != 0x30 && val != 0x40) {
3455 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
3456 err = -ENODEV;
3457 goto out_unmap;
3458 }
3459
3460 err = its_force_quiescent(its_base);
3461 if (err) {
3462 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
3463 goto out_unmap;
3464 }
3465
3466 pr_info("ITS %pR\n", res);
3467
3468 its = kzalloc(sizeof(*its), GFP_KERNEL);
3469 if (!its) {
3470 err = -ENOMEM;
3471 goto out_unmap;
3472 }
3473
3474 raw_spin_lock_init(&its->lock);
3475 mutex_init(&its->dev_alloc_lock);
3476 INIT_LIST_HEAD(&its->entry);
3477 INIT_LIST_HEAD(&its->its_device_list);
3478 typer = gic_read_typer(its_base + GITS_TYPER);
3479 its->base = its_base;
3480 its->phys_base = res->start;
3481 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
3482 its->device_ids = GITS_TYPER_DEVBITS(typer);
3483 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3484 if (its->is_v4) {
3485 if (!(typer & GITS_TYPER_VMOVP)) {
3486 err = its_compute_its_list_map(res, its_base);
3487 if (err < 0)
3488 goto out_free_its;
3489
3490 its->list_nr = err;
3491
3492 pr_info("ITS@%pa: Using ITS number %d\n",
3493 &res->start, err);
3494 } else {
3495 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3496 }
3497 }
3498
3499 its->numa_node = numa_node;
3500
3501 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3502 get_order(ITS_CMD_QUEUE_SZ));
3503 if (!its->cmd_base) {
3504 err = -ENOMEM;
3505 goto out_free_its;
3506 }
3507 its->cmd_write = its->cmd_base;
3508 its->fwnode_handle = handle;
3509 its->get_msi_base = its_irq_get_msi_base;
3510 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
3511
3512 its_enable_quirks(its);
3513
3514 err = its_alloc_tables(its);
3515 if (err)
3516 goto out_free_cmd;
3517
3518 err = its_alloc_collections(its);
3519 if (err)
3520 goto out_free_tables;
3521
3522 baser = (virt_to_phys(its->cmd_base) |
3523 GITS_CBASER_RaWaWb |
3524 GITS_CBASER_InnerShareable |
3525 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3526 GITS_CBASER_VALID);
3527
3528 gits_write_cbaser(baser, its->base + GITS_CBASER);
3529 tmp = gits_read_cbaser(its->base + GITS_CBASER);
3530
3531 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3532 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3533 /*
3534 * The HW reports non-shareable, we must
3535 * remove the cacheability attributes as
3536 * well.
3537 */
3538 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3539 GITS_CBASER_CACHEABILITY_MASK);
3540 baser |= GITS_CBASER_nC;
3541 gits_write_cbaser(baser, its->base + GITS_CBASER);
3542 }
3543 pr_info("ITS: using cache flushing for cmd queue\n");
3544 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3545 }
3546
3547 gits_write_cwriter(0, its->base + GITS_CWRITER);
3548 ctlr = readl_relaxed(its->base + GITS_CTLR);
3549 ctlr |= GITS_CTLR_ENABLE;
3550 if (its->is_v4)
3551 ctlr |= GITS_CTLR_ImDe;
3552 writel_relaxed(ctlr, its->base + GITS_CTLR);
3553
3554 err = its_init_domain(handle, its);
3555 if (err)
3556 goto out_free_tables;
3557
3558 raw_spin_lock(&its_lock);
3559 list_add(&its->entry, &its_nodes);
3560 raw_spin_unlock(&its_lock);
3561
3562 return 0;
3563
3564 out_free_tables:
3565 its_free_tables(its);
3566 out_free_cmd:
3567 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3568 out_free_its:
3569 kfree(its);
3570 out_unmap:
3571 iounmap(its_base);
3572 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3573 return err;
3574 }
3575
gic_rdists_supports_plpis(void)3576 static bool gic_rdists_supports_plpis(void)
3577 {
3578 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3579 }
3580
redist_disable_lpis(void)3581 static int redist_disable_lpis(void)
3582 {
3583 void __iomem *rbase = gic_data_rdist_rd_base();
3584 u64 timeout = USEC_PER_SEC;
3585 u64 val;
3586
3587 /*
3588 * If coming via a CPU hotplug event, we don't need to disable
3589 * LPIs before trying to re-enable them. They are already
3590 * configured and all is well in the world. Detect this case
3591 * by checking the allocation of the pending table for the
3592 * current CPU.
3593 */
3594 if (gic_data_rdist()->pend_page)
3595 return 0;
3596
3597 if (!gic_rdists_supports_plpis()) {
3598 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3599 return -ENXIO;
3600 }
3601
3602 val = readl_relaxed(rbase + GICR_CTLR);
3603 if (!(val & GICR_CTLR_ENABLE_LPIS))
3604 return 0;
3605
3606 pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3607 smp_processor_id());
3608 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3609
3610 /* Disable LPIs */
3611 val &= ~GICR_CTLR_ENABLE_LPIS;
3612 writel_relaxed(val, rbase + GICR_CTLR);
3613
3614 /* Make sure any change to GICR_CTLR is observable by the GIC */
3615 dsb(sy);
3616
3617 /*
3618 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3619 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3620 * Error out if we time out waiting for RWP to clear.
3621 */
3622 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3623 if (!timeout) {
3624 pr_err("CPU%d: Timeout while disabling LPIs\n",
3625 smp_processor_id());
3626 return -ETIMEDOUT;
3627 }
3628 udelay(1);
3629 timeout--;
3630 }
3631
3632 /*
3633 * After it has been written to 1, it is IMPLEMENTATION
3634 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3635 * cleared to 0. Error out if clearing the bit failed.
3636 */
3637 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3638 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3639 return -EBUSY;
3640 }
3641
3642 return 0;
3643 }
3644
its_cpu_init(void)3645 int its_cpu_init(void)
3646 {
3647 if (!list_empty(&its_nodes)) {
3648 int ret;
3649
3650 ret = redist_disable_lpis();
3651 if (ret)
3652 return ret;
3653
3654 its_cpu_init_lpis();
3655 its_cpu_init_collections();
3656 }
3657
3658 return 0;
3659 }
3660
3661 static const struct of_device_id its_device_id[] = {
3662 { .compatible = "arm,gic-v3-its", },
3663 {},
3664 };
3665
its_of_probe(struct device_node * node)3666 static int __init its_of_probe(struct device_node *node)
3667 {
3668 struct device_node *np;
3669 struct resource res;
3670
3671 for (np = of_find_matching_node(node, its_device_id); np;
3672 np = of_find_matching_node(np, its_device_id)) {
3673 if (!of_device_is_available(np))
3674 continue;
3675 if (!of_property_read_bool(np, "msi-controller")) {
3676 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3677 np);
3678 continue;
3679 }
3680
3681 if (of_address_to_resource(np, 0, &res)) {
3682 pr_warn("%pOF: no regs?\n", np);
3683 continue;
3684 }
3685
3686 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3687 }
3688 return 0;
3689 }
3690
3691 #ifdef CONFIG_ACPI
3692
3693 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3694
3695 #ifdef CONFIG_ACPI_NUMA
3696 struct its_srat_map {
3697 /* numa node id */
3698 u32 numa_node;
3699 /* GIC ITS ID */
3700 u32 its_id;
3701 };
3702
3703 static struct its_srat_map *its_srat_maps __initdata;
3704 static int its_in_srat __initdata;
3705
acpi_get_its_numa_node(u32 its_id)3706 static int __init acpi_get_its_numa_node(u32 its_id)
3707 {
3708 int i;
3709
3710 for (i = 0; i < its_in_srat; i++) {
3711 if (its_id == its_srat_maps[i].its_id)
3712 return its_srat_maps[i].numa_node;
3713 }
3714 return NUMA_NO_NODE;
3715 }
3716
gic_acpi_match_srat_its(struct acpi_subtable_header * header,const unsigned long end)3717 static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3718 const unsigned long end)
3719 {
3720 return 0;
3721 }
3722
gic_acpi_parse_srat_its(struct acpi_subtable_header * header,const unsigned long end)3723 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3724 const unsigned long end)
3725 {
3726 int node;
3727 struct acpi_srat_gic_its_affinity *its_affinity;
3728
3729 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3730 if (!its_affinity)
3731 return -EINVAL;
3732
3733 if (its_affinity->header.length < sizeof(*its_affinity)) {
3734 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3735 its_affinity->header.length);
3736 return -EINVAL;
3737 }
3738
3739 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3740
3741 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3742 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3743 return 0;
3744 }
3745
3746 its_srat_maps[its_in_srat].numa_node = node;
3747 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3748 its_in_srat++;
3749 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3750 its_affinity->proximity_domain, its_affinity->its_id, node);
3751
3752 return 0;
3753 }
3754
acpi_table_parse_srat_its(void)3755 static void __init acpi_table_parse_srat_its(void)
3756 {
3757 int count;
3758
3759 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3760 sizeof(struct acpi_table_srat),
3761 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3762 gic_acpi_match_srat_its, 0);
3763 if (count <= 0)
3764 return;
3765
3766 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3767 GFP_KERNEL);
3768 if (!its_srat_maps) {
3769 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3770 return;
3771 }
3772
3773 acpi_table_parse_entries(ACPI_SIG_SRAT,
3774 sizeof(struct acpi_table_srat),
3775 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3776 gic_acpi_parse_srat_its, 0);
3777 }
3778
3779 /* free the its_srat_maps after ITS probing */
acpi_its_srat_maps_free(void)3780 static void __init acpi_its_srat_maps_free(void)
3781 {
3782 kfree(its_srat_maps);
3783 }
3784 #else
acpi_table_parse_srat_its(void)3785 static void __init acpi_table_parse_srat_its(void) { }
acpi_get_its_numa_node(u32 its_id)3786 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
acpi_its_srat_maps_free(void)3787 static void __init acpi_its_srat_maps_free(void) { }
3788 #endif
3789
gic_acpi_parse_madt_its(struct acpi_subtable_header * header,const unsigned long end)3790 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3791 const unsigned long end)
3792 {
3793 struct acpi_madt_generic_translator *its_entry;
3794 struct fwnode_handle *dom_handle;
3795 struct resource res;
3796 int err;
3797
3798 its_entry = (struct acpi_madt_generic_translator *)header;
3799 memset(&res, 0, sizeof(res));
3800 res.start = its_entry->base_address;
3801 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3802 res.flags = IORESOURCE_MEM;
3803
3804 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3805 if (!dom_handle) {
3806 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3807 &res.start);
3808 return -ENOMEM;
3809 }
3810
3811 err = iort_register_domain_token(its_entry->translation_id, res.start,
3812 dom_handle);
3813 if (err) {
3814 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3815 &res.start, its_entry->translation_id);
3816 goto dom_err;
3817 }
3818
3819 err = its_probe_one(&res, dom_handle,
3820 acpi_get_its_numa_node(its_entry->translation_id));
3821 if (!err)
3822 return 0;
3823
3824 iort_deregister_domain_token(its_entry->translation_id);
3825 dom_err:
3826 irq_domain_free_fwnode(dom_handle);
3827 return err;
3828 }
3829
its_acpi_probe(void)3830 static void __init its_acpi_probe(void)
3831 {
3832 acpi_table_parse_srat_its();
3833 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3834 gic_acpi_parse_madt_its, 0);
3835 acpi_its_srat_maps_free();
3836 }
3837 #else
its_acpi_probe(void)3838 static void __init its_acpi_probe(void) { }
3839 #endif
3840
its_init(struct fwnode_handle * handle,struct rdists * rdists,struct irq_domain * parent_domain)3841 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3842 struct irq_domain *parent_domain)
3843 {
3844 struct device_node *of_node;
3845 struct its_node *its;
3846 bool has_v4 = false;
3847 int err;
3848
3849 its_parent = parent_domain;
3850 of_node = to_of_node(handle);
3851 if (of_node)
3852 its_of_probe(of_node);
3853 else
3854 its_acpi_probe();
3855
3856 if (list_empty(&its_nodes)) {
3857 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3858 return -ENXIO;
3859 }
3860
3861 gic_rdists = rdists;
3862 err = its_alloc_lpi_tables();
3863 if (err)
3864 return err;
3865
3866 list_for_each_entry(its, &its_nodes, entry)
3867 has_v4 |= its->is_v4;
3868
3869 if (has_v4 & rdists->has_vlpis) {
3870 if (its_init_vpe_domain() ||
3871 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3872 rdists->has_vlpis = false;
3873 pr_err("ITS: Disabling GICv4 support\n");
3874 }
3875 }
3876
3877 register_syscore_ops(&its_syscore_ops);
3878
3879 return 0;
3880 }
3881