1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2006-2015, Intel Corporation.
5 * Authors: Ashok Raj <ashok.raj@intel.com>
6 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 * David Woodhouse <David.Woodhouse@intel.com>
10 #ifndef _INTEL_IOMMU_H_
11 #define _INTEL_IOMMU_H_
13 #include <linux/types.h>
14 #include <linux/iova.h>
16 #include <linux/idr.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/list.h>
19 #include <linux/iommu.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/dmar.h>
22 #include <linux/bitfield.h>
23 #include <linux/xarray.h>
24 #include <linux/perf_event.h>
26 #include <asm/cacheflush.h>
27 #include <asm/iommu.h>
28 #include <uapi/linux/iommufd.h>
31 * VT-d hardware uses 4KiB page size regardless of host page size.
33 #define VTD_PAGE_SHIFT (12)
34 #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
35 #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
36 #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
38 #define VTD_STRIDE_SHIFT (9)
39 #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
41 #define DMA_PTE_READ BIT_ULL(0)
42 #define DMA_PTE_WRITE BIT_ULL(1)
43 #define DMA_PTE_LARGE_PAGE BIT_ULL(7)
44 #define DMA_PTE_SNP BIT_ULL(11)
46 #define DMA_FL_PTE_PRESENT BIT_ULL(0)
47 #define DMA_FL_PTE_US BIT_ULL(2)
48 #define DMA_FL_PTE_ACCESS BIT_ULL(5)
49 #define DMA_FL_PTE_DIRTY BIT_ULL(6)
50 #define DMA_FL_PTE_XD BIT_ULL(63)
52 #define DMA_SL_PTE_DIRTY_BIT 9
53 #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT)
55 #define ADDR_WIDTH_5LEVEL (57)
56 #define ADDR_WIDTH_4LEVEL (48)
58 #define CONTEXT_TT_MULTI_LEVEL 0
59 #define CONTEXT_TT_DEV_IOTLB 1
60 #define CONTEXT_TT_PASS_THROUGH 2
61 #define CONTEXT_PASIDE BIT_ULL(3)
64 * Intel IOMMU register specification per version 1.0 public spec.
66 #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
67 #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
68 #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
69 #define DMAR_GCMD_REG 0x18 /* Global command register */
70 #define DMAR_GSTS_REG 0x1c /* Global status register */
71 #define DMAR_RTADDR_REG 0x20 /* Root entry table */
72 #define DMAR_CCMD_REG 0x28 /* Context command reg */
73 #define DMAR_FSTS_REG 0x34 /* Fault Status register */
74 #define DMAR_FECTL_REG 0x38 /* Fault control register */
75 #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
76 #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
77 #define DMAR_FEUADDR_REG 0x44 /* Upper address register */
78 #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
79 #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
80 #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
81 #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
82 #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
83 #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
84 #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
85 #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
86 #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
87 #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
88 #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
89 #define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
90 #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
91 #define DMAR_PQH_REG 0xc0 /* Page request queue head register */
92 #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
93 #define DMAR_PQA_REG 0xd0 /* Page request queue address register */
94 #define DMAR_PRS_REG 0xdc /* Page request status register */
95 #define DMAR_PECTL_REG 0xe0 /* Page request event control register */
96 #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
97 #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
98 #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
99 #define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
100 #define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
101 #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
102 #define DMAR_MTRR_FIX16K_80000_REG 0x128
103 #define DMAR_MTRR_FIX16K_A0000_REG 0x130
104 #define DMAR_MTRR_FIX4K_C0000_REG 0x138
105 #define DMAR_MTRR_FIX4K_C8000_REG 0x140
106 #define DMAR_MTRR_FIX4K_D0000_REG 0x148
107 #define DMAR_MTRR_FIX4K_D8000_REG 0x150
108 #define DMAR_MTRR_FIX4K_E0000_REG 0x158
109 #define DMAR_MTRR_FIX4K_E8000_REG 0x160
110 #define DMAR_MTRR_FIX4K_F0000_REG 0x168
111 #define DMAR_MTRR_FIX4K_F8000_REG 0x170
112 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
113 #define DMAR_MTRR_PHYSMASK0_REG 0x188
114 #define DMAR_MTRR_PHYSBASE1_REG 0x190
115 #define DMAR_MTRR_PHYSMASK1_REG 0x198
116 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0
117 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8
118 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0
119 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8
120 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0
121 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8
122 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0
123 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8
124 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0
125 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8
126 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0
127 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8
128 #define DMAR_MTRR_PHYSBASE8_REG 0x200
129 #define DMAR_MTRR_PHYSMASK8_REG 0x208
130 #define DMAR_MTRR_PHYSBASE9_REG 0x210
131 #define DMAR_MTRR_PHYSMASK9_REG 0x218
132 #define DMAR_PERFCAP_REG 0x300
133 #define DMAR_PERFCFGOFF_REG 0x310
134 #define DMAR_PERFOVFOFF_REG 0x318
135 #define DMAR_PERFCNTROFF_REG 0x31c
136 #define DMAR_PERFINTRSTS_REG 0x324
137 #define DMAR_PERFINTRCTL_REG 0x328
138 #define DMAR_PERFEVNTCAP_REG 0x380
139 #define DMAR_ECMD_REG 0x400
140 #define DMAR_ECEO_REG 0x408
141 #define DMAR_ECRSP_REG 0x410
142 #define DMAR_ECCAP_REG 0x430
144 #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
145 #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
146 #define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
148 #define OFFSET_STRIDE (9)
150 #define dmar_readq(a) readq(a)
151 #define dmar_writeq(a,v) writeq(v,a)
152 #define dmar_readl(a) readl(a)
153 #define dmar_writel(a, v) writel(v, a)
155 #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
156 #define DMAR_VER_MINOR(v) ((v) & 0x0f)
159 * Decoding Capability Register
161 #define cap_esrtps(c) (((c) >> 63) & 1)
162 #define cap_esirtps(c) (((c) >> 62) & 1)
163 #define cap_ecmds(c) (((c) >> 61) & 1)
164 #define cap_fl5lp_support(c) (((c) >> 60) & 1)
165 #define cap_pi_support(c) (((c) >> 59) & 1)
166 #define cap_fl1gp_support(c) (((c) >> 56) & 1)
167 #define cap_read_drain(c) (((c) >> 55) & 1)
168 #define cap_write_drain(c) (((c) >> 54) & 1)
169 #define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
170 #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
171 #define cap_pgsel_inv(c) (((c) >> 39) & 1)
173 #define cap_super_page_val(c) (((c) >> 34) & 0xf)
174 #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
175 * OFFSET_STRIDE) + 21)
177 #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
178 #define cap_max_fault_reg_offset(c) \
179 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
181 #define cap_zlr(c) (((c) >> 22) & 1)
182 #define cap_isoch(c) (((c) >> 23) & 1)
183 #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
184 #define cap_sagaw(c) (((c) >> 8) & 0x1f)
185 #define cap_caching_mode(c) (((c) >> 7) & 1)
186 #define cap_phmr(c) (((c) >> 6) & 1)
187 #define cap_plmr(c) (((c) >> 5) & 1)
188 #define cap_rwbf(c) (((c) >> 4) & 1)
189 #define cap_afl(c) (((c) >> 3) & 1)
190 #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
192 * Extended Capability Register
195 #define ecap_pms(e) (((e) >> 51) & 0x1)
196 #define ecap_rps(e) (((e) >> 49) & 0x1)
197 #define ecap_smpwc(e) (((e) >> 48) & 0x1)
198 #define ecap_flts(e) (((e) >> 47) & 0x1)
199 #define ecap_slts(e) (((e) >> 46) & 0x1)
200 #define ecap_slads(e) (((e) >> 45) & 0x1)
201 #define ecap_smts(e) (((e) >> 43) & 0x1)
202 #define ecap_dit(e) (((e) >> 41) & 0x1)
203 #define ecap_pds(e) (((e) >> 42) & 0x1)
204 #define ecap_pasid(e) (((e) >> 40) & 0x1)
205 #define ecap_pss(e) (((e) >> 35) & 0x1f)
206 #define ecap_eafs(e) (((e) >> 34) & 0x1)
207 #define ecap_nwfs(e) (((e) >> 33) & 0x1)
208 #define ecap_srs(e) (((e) >> 31) & 0x1)
209 #define ecap_ers(e) (((e) >> 30) & 0x1)
210 #define ecap_prs(e) (((e) >> 29) & 0x1)
211 #define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
212 #define ecap_dis(e) (((e) >> 27) & 0x1)
213 #define ecap_nest(e) (((e) >> 26) & 0x1)
214 #define ecap_mts(e) (((e) >> 25) & 0x1)
215 #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
216 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
217 #define ecap_coherent(e) ((e) & 0x1)
218 #define ecap_qis(e) ((e) & 0x2)
219 #define ecap_pass_through(e) (((e) >> 6) & 0x1)
220 #define ecap_eim_support(e) (((e) >> 4) & 0x1)
221 #define ecap_ir_support(e) (((e) >> 3) & 0x1)
222 #define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
223 #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
224 #define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
227 * Decoding Perf Capability Register
229 #define pcap_num_cntr(p) ((p) & 0xffff)
230 #define pcap_cntr_width(p) (((p) >> 16) & 0x7f)
231 #define pcap_num_event_group(p) (((p) >> 24) & 0x1f)
232 #define pcap_filters_mask(p) (((p) >> 32) & 0x1f)
233 #define pcap_interrupt(p) (((p) >> 50) & 0x1)
234 /* The counter stride is calculated as 2 ^ (x+10) bytes */
235 #define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10))
238 * Decoding Perf Event Capability Register
240 #define pecap_es(p) ((p) & 0xfffffff)
242 /* Virtual command interface capability */
243 #define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
246 #define DMA_TLB_FLUSH_GRANU_OFFSET 60
247 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
248 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
249 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
250 #define DMA_TLB_IIRG(type) ((type >> 60) & 3)
251 #define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
252 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
253 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
254 #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
255 #define DMA_TLB_IVT (((u64)1) << 63)
256 #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
257 #define DMA_TLB_MAX_SIZE (0x3f)
260 #define DMA_CCMD_INVL_GRANU_OFFSET 61
261 #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
262 #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
263 #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
264 #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
265 #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
266 #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
267 #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
268 #define DMA_ID_TLB_ADDR(addr) (addr)
269 #define DMA_ID_TLB_ADDR_MASK(mask) (mask)
272 #define DMA_PMEN_EPM (((u32)1)<<31)
273 #define DMA_PMEN_PRS (((u32)1)<<0)
276 #define DMA_GCMD_TE (((u32)1) << 31)
277 #define DMA_GCMD_SRTP (((u32)1) << 30)
278 #define DMA_GCMD_SFL (((u32)1) << 29)
279 #define DMA_GCMD_EAFL (((u32)1) << 28)
280 #define DMA_GCMD_WBF (((u32)1) << 27)
281 #define DMA_GCMD_QIE (((u32)1) << 26)
282 #define DMA_GCMD_SIRTP (((u32)1) << 24)
283 #define DMA_GCMD_IRE (((u32) 1) << 25)
284 #define DMA_GCMD_CFI (((u32) 1) << 23)
287 #define DMA_GSTS_TES (((u32)1) << 31)
288 #define DMA_GSTS_RTPS (((u32)1) << 30)
289 #define DMA_GSTS_FLS (((u32)1) << 29)
290 #define DMA_GSTS_AFLS (((u32)1) << 28)
291 #define DMA_GSTS_WBFS (((u32)1) << 27)
292 #define DMA_GSTS_QIES (((u32)1) << 26)
293 #define DMA_GSTS_IRTPS (((u32)1) << 24)
294 #define DMA_GSTS_IRES (((u32)1) << 25)
295 #define DMA_GSTS_CFIS (((u32)1) << 23)
298 #define DMA_RTADDR_SMT (((u64)1) << 10)
301 #define DMA_CCMD_ICC (((u64)1) << 63)
302 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
303 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
304 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
305 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
306 #define DMA_CCMD_MASK_NOBIT 0
307 #define DMA_CCMD_MASK_1BIT 1
308 #define DMA_CCMD_MASK_2BIT 2
309 #define DMA_CCMD_MASK_3BIT 3
310 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
311 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
314 #define DMA_MAX_NUM_ECMD 256
315 #define DMA_MAX_NUM_ECMDCAP (DMA_MAX_NUM_ECMD / 64)
316 #define DMA_ECMD_REG_STEP 8
317 #define DMA_ECMD_ENABLE 0xf0
318 #define DMA_ECMD_DISABLE 0xf1
319 #define DMA_ECMD_FREEZE 0xf4
320 #define DMA_ECMD_UNFREEZE 0xf5
321 #define DMA_ECMD_OA_SHIFT 16
322 #define DMA_ECMD_ECRSP_IP 0x1
323 #define DMA_ECMD_ECCAP3 3
324 #define DMA_ECMD_ECCAP3_ECNTS BIT_ULL(48)
325 #define DMA_ECMD_ECCAP3_DCNTS BIT_ULL(49)
326 #define DMA_ECMD_ECCAP3_FCNTS BIT_ULL(52)
327 #define DMA_ECMD_ECCAP3_UFCNTS BIT_ULL(53)
328 #define DMA_ECMD_ECCAP3_ESSENTIAL (DMA_ECMD_ECCAP3_ECNTS | \
329 DMA_ECMD_ECCAP3_DCNTS | \
330 DMA_ECMD_ECCAP3_FCNTS | \
331 DMA_ECMD_ECCAP3_UFCNTS)
334 #define DMA_FECTL_IM (((u32)1) << 31)
337 #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
338 #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
339 #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
340 #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
341 #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
342 #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
343 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
345 /* FRCD_REG, 32 bits access */
346 #define DMA_FRCD_F (((u32)1) << 31)
347 #define dma_frcd_type(d) ((d >> 30) & 1)
348 #define dma_frcd_fault_reason(c) (c & 0xff)
349 #define dma_frcd_source_id(c) (c & 0xffff)
350 #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
351 #define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
353 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
356 #define DMA_PRS_PPR ((u32)1)
357 #define DMA_PRS_PRO ((u32)2)
359 #define DMA_VCS_PAS ((u64)1)
361 /* PERFINTRSTS_REG */
362 #define DMA_PERFINTRSTS_PIS ((u32)1)
364 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
366 cycles_t start_time = get_cycles(); \
368 sts = op(iommu->reg + offset); \
371 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
372 panic("DMAR hardware is malfunctioning\n"); \
377 #define QI_LENGTH 256 /* queue length */
386 #define QI_CC_TYPE 0x1
387 #define QI_IOTLB_TYPE 0x2
388 #define QI_DIOTLB_TYPE 0x3
389 #define QI_IEC_TYPE 0x4
390 #define QI_IWD_TYPE 0x5
391 #define QI_EIOTLB_TYPE 0x6
392 #define QI_PC_TYPE 0x7
393 #define QI_DEIOTLB_TYPE 0x8
394 #define QI_PGRP_RESP_TYPE 0x9
395 #define QI_PSTRM_RESP_TYPE 0xa
397 #define QI_IEC_SELECTIVE (((u64)1) << 4)
398 #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
399 #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
401 #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
402 #define QI_IWD_STATUS_WRITE (((u64)1) << 5)
403 #define QI_IWD_FENCE (((u64)1) << 6)
404 #define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
406 #define QI_IOTLB_DID(did) (((u64)did) << 16)
407 #define QI_IOTLB_DR(dr) (((u64)dr) << 7)
408 #define QI_IOTLB_DW(dw) (((u64)dw) << 6)
409 #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
410 #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
411 #define QI_IOTLB_IH(ih) (((u64)ih) << 6)
412 #define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
414 #define QI_CC_FM(fm) (((u64)fm) << 48)
415 #define QI_CC_SID(sid) (((u64)sid) << 32)
416 #define QI_CC_DID(did) (((u64)did) << 16)
417 #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
419 #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
420 #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
421 #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
422 #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
423 ((u64)((pfsid >> 4) & 0xfff) << 52))
424 #define QI_DEV_IOTLB_SIZE 1
425 #define QI_DEV_IOTLB_MAX_INVS 32
427 #define QI_PC_PASID(pasid) (((u64)pasid) << 32)
428 #define QI_PC_DID(did) (((u64)did) << 16)
429 #define QI_PC_GRAN(gran) (((u64)gran) << 4)
431 /* PASID cache invalidation granu */
432 #define QI_PC_ALL_PASIDS 0
433 #define QI_PC_PASID_SEL 1
434 #define QI_PC_GLOBAL 3
436 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
437 #define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
438 #define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
439 #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
440 #define QI_EIOTLB_DID(did) (((u64)did) << 16)
441 #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
443 /* QI Dev-IOTLB inv granu */
444 #define QI_DEV_IOTLB_GRAN_ALL 1
445 #define QI_DEV_IOTLB_GRAN_PASID_SEL 0
447 #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
448 #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
449 #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
450 #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
451 #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
452 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
453 ((u64)((pfsid >> 4) & 0xfff) << 52))
454 #define QI_DEV_EIOTLB_MAX_INVS 32
456 /* Page group response descriptor QW0 */
457 #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
458 #define QI_PGRP_PDP(p) (((u64)(p)) << 5)
459 #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
460 #define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
461 #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
463 /* Page group response descriptor QW1 */
464 #define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
465 #define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
468 #define QI_RESP_SUCCESS 0x0
469 #define QI_RESP_INVALID 0x1
470 #define QI_RESP_FAILURE 0xf
472 #define QI_GRAN_NONG_PASID 2
473 #define QI_GRAN_PSI_PASID 3
475 #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
485 raw_spinlock_t q_lock;
486 void *desc; /* invalidation queue */
487 int *desc_status; /* desc status */
488 int free_head; /* first free entry */
489 int free_tail; /* last free entry */
493 /* Page Request Queue depth */
495 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
496 #define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
498 struct dmar_pci_notify_info;
500 #ifdef CONFIG_IRQ_REMAP
501 /* 1MB - maximum possible interrupt remapping table size */
502 #define INTR_REMAP_PAGE_ORDER 8
503 #define INTR_REMAP_TABLE_REG_SIZE 0xf
504 #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
506 #define INTR_REMAP_TABLE_ENTRIES 65536
512 unsigned long *bitmap;
515 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
518 intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
522 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
524 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
525 unsigned int size_order, u64 type);
536 #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
537 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
538 #define VTD_FLAG_SVM_CAPABLE (1 << 2)
540 #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
541 #define pasid_supported(iommu) (sm_supported(iommu) && \
542 ecap_pasid((iommu)->ecap))
543 #define ssads_supported(iommu) (sm_supported(iommu) && \
544 ecap_slads((iommu)->ecap))
545 #define nested_supported(iommu) (sm_supported(iommu) && \
546 ecap_nest((iommu)->ecap))
549 struct pasid_state_entry;
555 * 12-63: Context Ptr (12 - (haw-1))
566 * 1: fault processing disable
567 * 2-3: translation type
568 * 12-63: address space root
574 struct context_entry {
579 struct iommu_domain_info {
580 struct intel_iommu *iommu;
581 unsigned int refcnt; /* Refcount of devices per iommu */
582 u16 did; /* Domain ids per IOMMU. Use u16 since
583 * domain ids are 16 bit wide according
584 * to VT-d spec, section 9.3 */
588 int nid; /* node id */
589 struct xarray iommu_array; /* Attached IOMMU array */
591 u8 has_iotlb_device: 1;
592 u8 iommu_coherency: 1; /* indicate coherency of iommu access */
593 u8 force_snooping : 1; /* Create IOPTEs with snoop control */
595 u8 use_first_level:1; /* DMA translation for the domain goes
596 * through the first level page table,
597 * otherwise, goes through the second
600 u8 dirty_tracking:1; /* Dirty tracking is enabled */
601 u8 nested_parent:1; /* Has other domains nested on it */
602 u8 has_mappings:1; /* Has mappings configured through
603 * iommu_map() interface.
606 spinlock_t lock; /* Protect device tracking lists */
607 struct list_head devices; /* all devices' list */
608 struct list_head dev_pasids; /* all attached pasids */
610 int iommu_superpage;/* Level of superpages supported:
611 0 == 4KiB (no superpages), 1 == 2MiB,
612 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
614 /* DMA remapping domain */
616 /* virtual address */
618 /* max guest address width */
621 * adjusted guest address width:
628 /* maximum mapped address */
632 /* Nested user domain */
634 /* parent page table which the user domain is nested on */
635 struct dmar_domain *s2_domain;
636 /* user page table pointer (in GPA) */
637 unsigned long s1_pgtbl;
638 /* page table attributes */
639 struct iommu_hwpt_vtd_s1 s1_cfg;
643 struct iommu_domain domain; /* generic domain data structure for
648 * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
649 * But in practice, there are only 14 counters for the existing
650 * platform. Setting the max number of counters to 64 should be good
651 * enough for a long time. Also, supporting more than 64 counters
652 * requires more extras, e.g., extra freeze and overflow registers,
653 * which is not necessary for now.
655 #define IOMMU_PMU_IDX_MAX 64
658 struct intel_iommu *iommu;
659 u32 num_cntr; /* Number of counters */
660 u32 num_eg; /* Number of event group */
661 u32 cntr_width; /* Counter width */
662 u32 cntr_stride; /* Counter Stride */
663 u32 filter; /* Bitmask of filter support */
664 void __iomem *base; /* the PerfMon base address */
665 void __iomem *cfg_reg; /* counter configuration base address */
666 void __iomem *cntr_reg; /* counter 0 address*/
667 void __iomem *overflow; /* overflow status register */
669 u64 *evcap; /* Indicates all supported events */
670 u32 **cntr_evcap; /* Supported events of each counter. */
673 DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
674 struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
675 unsigned char irq_name[16];
676 struct hlist_node cpuhp_node;
680 #define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
681 #define IOMMU_IRQ_ID_OFFSET_PERF (2 * DMAR_UNITS_SUPPORTED)
684 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
685 u64 reg_phys; /* physical address of hw register set */
686 u64 reg_size; /* size of hw register set */
690 u64 ecmdcap[DMA_MAX_NUM_ECMDCAP];
691 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
692 raw_spinlock_t register_lock; /* protect register handling */
693 int seq_id; /* sequence id of the iommu */
694 int agaw; /* agaw of this iommu */
695 int msagaw; /* max sagaw of this iommu */
696 unsigned int irq, pr_irq, perf_irq;
697 u16 segment; /* PCI segment# */
698 unsigned char name[13]; /* Device Name */
700 #ifdef CONFIG_INTEL_IOMMU
701 unsigned long *domain_ids; /* bitmap of domains */
702 unsigned long *copied_tables; /* bitmap of copied tables */
703 spinlock_t lock; /* protect context, domain ids */
704 struct root_entry *root_entry; /* virtual address */
706 struct iommu_flush flush;
708 #ifdef CONFIG_INTEL_IOMMU_SVM
709 struct page_req_dsc *prq;
710 unsigned char prq_name[16]; /* Name for PRQ interrupt */
711 unsigned long prq_seq_number;
712 struct completion prq_complete;
714 struct iopf_queue *iopf_queue;
715 unsigned char iopfq_name[16];
716 struct q_inval *qi; /* Queued invalidation info */
717 u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
719 #ifdef CONFIG_IRQ_REMAP
720 struct ir_table *ir_table; /* Interrupt remapping info */
721 struct irq_domain *ir_domain;
723 struct iommu_device iommu; /* IOMMU core code handle */
725 u32 flags; /* Software defined flags */
727 struct dmar_drhd_unit *drhd;
728 void *perf_statistic;
730 struct iommu_pmu *pmu;
733 /* PCI domain-device relationship */
734 struct device_domain_info {
735 struct list_head link; /* link to domain siblings */
736 u32 segment; /* PCI segment number */
737 u8 bus; /* PCI bus number */
738 u8 devfn; /* PCI devfn number */
739 u16 pfsid; /* SRIOV physical function source ID */
740 u8 pasid_supported:3;
746 u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
748 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
749 struct intel_iommu *iommu; /* IOMMU used by this device */
750 struct dmar_domain *domain; /* pointer to domain */
751 struct pasid_table *pasid_table; /* pasid table */
752 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
753 struct dentry *debugfs_dentry; /* pointer to device directory dentry */
757 struct dev_pasid_info {
758 struct list_head link_domain; /* link to domain siblings */
761 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
762 struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */
766 static inline void __iommu_flush_cache(
767 struct intel_iommu *iommu, void *addr, int size)
769 if (!ecap_coherent(iommu->ecap))
770 clflush_cache_range(addr, size);
773 /* Convert generic struct iommu_domain to private struct dmar_domain */
774 static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
776 return container_of(dom, struct dmar_domain, domain);
779 /* Retrieve the domain ID which has allocated to the domain */
781 domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
783 struct iommu_domain_info *info =
784 xa_load(&domain->iommu_array, iommu->seq_id);
796 * 12-63: Host physical address
802 static inline void dma_clear_pte(struct dma_pte *pte)
807 static inline u64 dma_pte_addr(struct dma_pte *pte)
810 return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
812 /* Must have a full atomic 64-bit read */
813 return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
814 VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
818 static inline bool dma_pte_present(struct dma_pte *pte)
820 return (pte->val & 3) != 0;
823 static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte,
826 if (flags & IOMMU_DIRTY_NO_CLEAR)
827 return (pte->val & DMA_SL_PTE_DIRTY) != 0;
829 return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT,
830 (unsigned long *)&pte->val);
833 static inline bool dma_pte_superpage(struct dma_pte *pte)
835 return (pte->val & DMA_PTE_LARGE_PAGE);
838 static inline bool first_pte_in_page(struct dma_pte *pte)
840 return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
843 static inline int nr_pte_to_next_page(struct dma_pte *pte)
845 return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
846 (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
849 static inline bool context_present(struct context_entry *context)
851 return (context->lo & 1);
854 #define LEVEL_STRIDE (9)
855 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
856 #define MAX_AGAW_WIDTH (64)
857 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
859 static inline int agaw_to_level(int agaw)
864 static inline int agaw_to_width(int agaw)
866 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
869 static inline int width_to_agaw(int width)
871 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
874 static inline unsigned int level_to_offset_bits(int level)
876 return (level - 1) * LEVEL_STRIDE;
879 static inline int pfn_level_offset(u64 pfn, int level)
881 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
884 static inline u64 level_mask(int level)
886 return -1ULL << level_to_offset_bits(level);
889 static inline u64 level_size(int level)
891 return 1ULL << level_to_offset_bits(level);
894 static inline u64 align_to_level(u64 pfn, int level)
896 return (pfn + level_size(level) - 1) & level_mask(level);
899 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
901 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
904 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
905 are never going to work. */
906 static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
908 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
910 static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
912 return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
914 static inline unsigned long page_to_dma_pfn(struct page *pg)
916 return mm_to_dma_pfn_start(page_to_pfn(pg));
918 static inline unsigned long virt_to_dma_pfn(void *p)
920 return page_to_dma_pfn(virt_to_page(p));
923 static inline void context_set_present(struct context_entry *context)
928 static inline void context_set_fault_enable(struct context_entry *context)
930 context->lo &= (((u64)-1) << 2) | 1;
933 static inline void context_set_translation_type(struct context_entry *context,
936 context->lo &= (((u64)-1) << 4) | 3;
937 context->lo |= (value & 3) << 2;
940 static inline void context_set_address_root(struct context_entry *context,
943 context->lo &= ~VTD_PAGE_MASK;
944 context->lo |= value & VTD_PAGE_MASK;
947 static inline void context_set_address_width(struct context_entry *context,
950 context->hi |= value & 7;
953 static inline void context_set_domain_id(struct context_entry *context,
956 context->hi |= (value & ((1 << 16) - 1)) << 8;
959 static inline void context_set_pasid(struct context_entry *context)
961 context->lo |= CONTEXT_PASIDE;
964 static inline int context_domain_id(struct context_entry *c)
966 return((c->hi >> 8) & 0xffff);
969 static inline void context_clear_entry(struct context_entry *context)
975 #ifdef CONFIG_INTEL_IOMMU
976 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
978 if (!iommu->copied_tables)
981 return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
985 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
987 set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
991 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
993 clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
995 #endif /* CONFIG_INTEL_IOMMU */
998 * Set the RID_PASID field of a scalable mode context entry. The
999 * IOMMU hardware will use the PASID value set in this field for
1000 * DMA translations of DMA requests without PASID.
1003 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1005 context->hi |= pasid & ((1 << 20) - 1);
1009 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1012 static inline void context_set_sm_dte(struct context_entry *context)
1014 context->lo |= BIT_ULL(2);
1018 * Set the PRE(Page Request Enable) field of a scalable mode context
1021 static inline void context_set_sm_pre(struct context_entry *context)
1023 context->lo |= BIT_ULL(4);
1026 /* Convert value to context PASID directory size field coding. */
1027 #define context_pdts(pds) (((pds) & 0x7) << 9)
1029 struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
1031 int dmar_enable_qi(struct intel_iommu *iommu);
1032 void dmar_disable_qi(struct intel_iommu *iommu);
1033 int dmar_reenable_qi(struct intel_iommu *iommu);
1034 void qi_global_iec(struct intel_iommu *iommu);
1036 void qi_flush_context(struct intel_iommu *iommu, u16 did,
1037 u16 sid, u8 fm, u64 type);
1038 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1039 unsigned int size_order, u64 type);
1040 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1041 u16 qdep, u64 addr, unsigned mask);
1043 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1044 unsigned long npages, bool ih);
1046 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1047 u32 pasid, u16 qdep, u64 addr,
1048 unsigned int size_order);
1049 void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
1050 unsigned long address, unsigned long pages,
1051 u32 pasid, u16 qdep);
1052 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
1055 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1056 unsigned int count, unsigned long options);
1058 * Options used in qi_submit_sync:
1059 * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
1061 #define QI_OPT_WAIT_DRAIN BIT(0)
1063 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
1064 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
1065 void device_block_translation(struct device *dev);
1066 int prepare_domain_attach_device(struct iommu_domain *domain,
1067 struct device *dev);
1068 void domain_update_iommu_cap(struct dmar_domain *domain);
1070 int dmar_ir_support(void);
1072 void *alloc_pgtable_page(int node, gfp_t gfp);
1073 void free_pgtable_page(void *vaddr);
1074 void iommu_flush_write_buffer(struct intel_iommu *iommu);
1075 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
1076 const struct iommu_user_data *user_data);
1078 #ifdef CONFIG_INTEL_IOMMU_SVM
1079 void intel_svm_check(struct intel_iommu *iommu);
1080 int intel_svm_enable_prq(struct intel_iommu *iommu);
1081 int intel_svm_finish_prq(struct intel_iommu *iommu);
1082 int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
1083 struct iommu_page_response *msg);
1084 struct iommu_domain *intel_svm_domain_alloc(void);
1085 void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
1086 void intel_drain_pasid_prq(struct device *dev, u32 pasid);
1088 struct intel_svm_dev {
1089 struct list_head list;
1090 struct rcu_head rcu;
1092 struct intel_iommu *iommu;
1098 struct mmu_notifier notifier;
1099 struct mm_struct *mm;
1101 struct list_head devs;
1104 static inline void intel_svm_check(struct intel_iommu *iommu) {}
1105 static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
1106 static inline struct iommu_domain *intel_svm_domain_alloc(void)
1111 static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
1116 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
1117 void intel_iommu_debugfs_init(void);
1118 void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
1119 void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
1120 void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
1121 void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
1123 static inline void intel_iommu_debugfs_init(void) {}
1124 static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
1125 static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
1126 static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
1127 static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
1128 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
1130 extern const struct attribute_group *intel_iommu_groups[];
1131 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
1132 u8 devfn, int alloc);
1134 extern const struct iommu_ops intel_iommu_ops;
1136 #ifdef CONFIG_INTEL_IOMMU
1137 extern int intel_iommu_sm;
1138 int iommu_calculate_agaw(struct intel_iommu *iommu);
1139 int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
1140 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);
1142 static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
1144 return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) ==
1145 DMA_ECMD_ECCAP3_ESSENTIAL;
1148 extern int dmar_disabled;
1149 extern int intel_iommu_enabled;
1151 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
1155 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
1159 #define dmar_disabled (1)
1160 #define intel_iommu_enabled (0)
1161 #define intel_iommu_sm (0)
1164 static inline const char *decode_prq_descriptor(char *str, size_t size,
1165 u64 dw0, u64 dw1, u64 dw2, u64 dw3)
1170 bytes = snprintf(buf, size,
1171 "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
1172 FIELD_GET(GENMASK_ULL(31, 16), dw0),
1173 FIELD_GET(GENMASK_ULL(63, 12), dw1),
1174 dw1 & BIT_ULL(0) ? 'r' : '-',
1175 dw1 & BIT_ULL(1) ? 'w' : '-',
1176 dw0 & BIT_ULL(52) ? 'x' : '-',
1177 dw0 & BIT_ULL(53) ? 'p' : '-',
1178 dw1 & BIT_ULL(2) ? 'l' : '-',
1179 FIELD_GET(GENMASK_ULL(51, 32), dw0),
1180 FIELD_GET(GENMASK_ULL(11, 3), dw1));
1183 if (dw0 & BIT_ULL(9)) {
1186 snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);