aboutsummaryrefslogtreecommitdiff
path: root/include/hw/i386/intel_iommu.h
blob: 7fa0a695c87bb8569fd6985e299fc0a1cc4b0c0c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
/*
 * QEMU emulation of an Intel IOMMU (VT-d)
 *   (DMA Remapping device)
 *
 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.

 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.

 * You should have received a copy of the GNU General Public License along
 * with this program; if not, see <http://www.gnu.org/licenses/>.
 */

#ifndef INTEL_IOMMU_H
#define INTEL_IOMMU_H

#include "hw/i386/x86-iommu.h"
#include "qemu/iova-tree.h"
#include "qom/object.h"

#define TYPE_INTEL_IOMMU_DEVICE "intel-iommu"
OBJECT_DECLARE_SIMPLE_TYPE(IntelIOMMUState, INTEL_IOMMU_DEVICE)

#define TYPE_INTEL_IOMMU_MEMORY_REGION "intel-iommu-iommu-memory-region"

/* DMAR Hardware Unit Definition address (IOMMU unit) */
#define Q35_HOST_BRIDGE_IOMMU_ADDR  0xfed90000ULL

#define VTD_PCI_BUS_MAX             256
#define VTD_PCI_SLOT_MAX            32
#define VTD_PCI_FUNC_MAX            8
#define VTD_PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
#define VTD_PCI_FUNC(devfn)         ((devfn) & 0x07)
#define VTD_SID_TO_BUS(sid)         (((sid) >> 8) & 0xff)
#define VTD_SID_TO_DEVFN(sid)       ((sid) & 0xff)

#define DMAR_REG_SIZE               0x230
#define VTD_HOST_AW_39BIT           39
#define VTD_HOST_AW_48BIT           48
#define VTD_HOST_ADDRESS_WIDTH      VTD_HOST_AW_39BIT
#define VTD_HAW_MASK(aw)            ((1ULL << (aw)) - 1)

#define DMAR_REPORT_F_INTR          (1)

#define  VTD_MSI_ADDR_HI_MASK        (0xffffffff00000000ULL)
#define  VTD_MSI_ADDR_HI_SHIFT       (32)
#define  VTD_MSI_ADDR_LO_MASK        (0x00000000ffffffffULL)

typedef struct VTDContextEntry VTDContextEntry;
typedef struct VTDContextCacheEntry VTDContextCacheEntry;
typedef struct VTDAddressSpace VTDAddressSpace;
typedef struct VTDIOTLBEntry VTDIOTLBEntry;
typedef union VTD_IR_TableEntry VTD_IR_TableEntry;
typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress;
typedef struct VTDPASIDDirEntry VTDPASIDDirEntry;
typedef struct VTDPASIDEntry VTDPASIDEntry;

/* Context-Entry */
struct VTDContextEntry {
    union {
        struct {
            uint64_t lo;
            uint64_t hi;
        };
        struct {
            uint64_t val[4];
        };
    };
};

struct VTDContextCacheEntry {
    /* The cache entry is obsolete if
     * context_cache_gen!=IntelIOMMUState.context_cache_gen
     */
    uint32_t context_cache_gen;
    struct VTDContextEntry context_entry;
};

/* PASID Directory Entry */
struct VTDPASIDDirEntry {
    uint64_t val;
};

/* PASID Table Entry */
struct VTDPASIDEntry {
    uint64_t val[8];
};

struct VTDAddressSpace {
    PCIBus *bus;
    uint8_t devfn;
    uint32_t pasid;
    AddressSpace as;
    IOMMUMemoryRegion iommu;
    MemoryRegion root;          /* The root container of the device */
    MemoryRegion nodmar;        /* The alias of shared nodmar MR */
    MemoryRegion iommu_ir;      /* Interrupt region: 0xfeeXXXXX */
    MemoryRegion iommu_ir_fault; /* Interrupt region for catching fault */
    IntelIOMMUState *iommu_state;
    VTDContextCacheEntry context_cache_entry;
    QLIST_ENTRY(VTDAddressSpace) next;
    /* Superset of notifier flags that this address space has */
    IOMMUNotifierFlag notifier_flags;
    /*
     * @iova_tree traces mapped IOVA ranges.
     *
     * The tree is not needed if no MAP notifier is registered with current
     * VTD address space, because all guest invalidate commands can be
     * directly passed to the IOMMU UNMAP notifiers without any further
     * reshuffling.
     *
     * The tree OTOH is required for MAP typed iommu notifiers for a few
     * reasons.
     *
     * Firstly, there's no way to identify whether an PSI (Page Selective
     * Invalidations) or DSI (Domain Selective Invalidations) event is an
     * MAP or UNMAP event within the message itself.  Without having prior
     * knowledge of existing state vIOMMU doesn't know whether it should
     * notify MAP or UNMAP for a PSI message it received when caching mode
     * is enabled (for MAP notifiers).
     *
     * Secondly, PSI messages received from guest driver can be enlarged in
     * range, covers but not limited to what the guest driver wanted to
     * invalidate.  When the range to invalidates gets bigger than the
     * limit of a PSI message, it can even become a DSI which will
     * invalidate the whole domain.  If the vIOMMU directly notifies the
     * registered device with the unmodified range, it may confuse the
     * registered drivers (e.g. vfio-pci) on either:
     *
     *   (1) Trying to map the same region more than once (for
     *       VFIO_IOMMU_MAP_DMA, -EEXIST will trigger), or,
     *
     *   (2) Trying to UNMAP a range that is still partially mapped.
     *
     * That accuracy is not required for UNMAP-only notifiers, but it is a
     * must-to-have for notifiers registered with MAP events, because the
     * vIOMMU needs to make sure the shadow page table is always in sync
     * with the guest IOMMU pgtables for a device.
     */
    IOVATree *iova_tree;
};

struct VTDIOTLBEntry {
    uint64_t gfn;
    uint16_t domain_id;
    uint32_t pasid;
    uint64_t slpte;
    uint64_t mask;
    uint8_t access_flags;
};

/* VT-d Source-ID Qualifier types */
enum {
    VTD_SQ_FULL = 0x00,     /* Full SID verification */
    VTD_SQ_IGN_3 = 0x01,    /* Ignore bit 3 */
    VTD_SQ_IGN_2_3 = 0x02,  /* Ignore bits 2 & 3 */
    VTD_SQ_IGN_1_3 = 0x03,  /* Ignore bits 1-3 */
    VTD_SQ_MAX,
};

/* VT-d Source Validation Types */
enum {
    VTD_SVT_NONE = 0x00,    /* No validation */
    VTD_SVT_ALL = 0x01,     /* Do full validation */
    VTD_SVT_BUS = 0x02,     /* Validate bus range */
    VTD_SVT_MAX,
};

/* Interrupt Remapping Table Entry Definition */
union VTD_IR_TableEntry {
    struct {
#if HOST_BIG_ENDIAN
        uint64_t dest_id:32;         /* Destination ID */
        uint64_t __reserved_1:8;     /* Reserved 1 */
        uint64_t vector:8;           /* Interrupt Vector */
        uint64_t irte_mode:1;        /* IRTE Mode */
        uint64_t __reserved_0:3;     /* Reserved 0 */
        uint64_t __avail:4;          /* Available spaces for software */
        uint64_t delivery_mode:3;    /* Delivery Mode */
        uint64_t trigger_mode:1;     /* Trigger Mode */
        uint64_t redir_hint:1;       /* Redirection Hint */
        uint64_t dest_mode:1;        /* Destination Mode */
        uint64_t fault_disable:1;    /* Fault Processing Disable */
        uint64_t present:1;          /* Whether entry present/available */
#else
        uint64_t present:1;          /* Whether entry present/available */
        uint64_t fault_disable:1;    /* Fault Processing Disable */
        uint64_t dest_mode:1;        /* Destination Mode */
        uint64_t redir_hint:1;       /* Redirection Hint */
        uint64_t trigger_mode:1;     /* Trigger Mode */
        uint64_t delivery_mode:3;    /* Delivery Mode */
        uint64_t __avail:4;          /* Available spaces for software */
        uint64_t __reserved_0:3;     /* Reserved 0 */
        uint64_t irte_mode:1;        /* IRTE Mode */
        uint64_t vector:8;           /* Interrupt Vector */
        uint64_t __reserved_1:8;     /* Reserved 1 */
        uint64_t dest_id:32;         /* Destination ID */
#endif
#if HOST_BIG_ENDIAN
        uint64_t __reserved_2:44;    /* Reserved 2 */
        uint64_t sid_vtype:2;        /* Source-ID Validation Type */
        uint64_t sid_q:2;            /* Source-ID Qualifier */
        uint64_t source_id:16;       /* Source-ID */
#else
        uint64_t source_id:16;       /* Source-ID */
        uint64_t sid_q:2;            /* Source-ID Qualifier */
        uint64_t sid_vtype:2;        /* Source-ID Validation Type */
        uint64_t __reserved_2:44;    /* Reserved 2 */
#endif
    } QEMU_PACKED irte;
    uint64_t data[2];
};

#define VTD_IR_INT_FORMAT_COMPAT     (0) /* Compatible Interrupt */
#define VTD_IR_INT_FORMAT_REMAP      (1) /* Remappable Interrupt */

/* Programming format for MSI/MSI-X addresses */
union VTD_IR_MSIAddress {
    struct {
#if HOST_BIG_ENDIAN
        uint32_t __head:12;          /* Should always be: 0x0fee */
        uint32_t index_l:15;         /* Interrupt index bit 14-0 */
        uint32_t int_mode:1;         /* Interrupt format */
        uint32_t sub_valid:1;        /* SHV: Sub-Handle Valid bit */
        uint32_t index_h:1;          /* Interrupt index bit 15 */
        uint32_t __not_care:2;
#else
        uint32_t __not_care:2;
        uint32_t index_h:1;          /* Interrupt index bit 15 */
        uint32_t sub_valid:1;        /* SHV: Sub-Handle Valid bit */
        uint32_t int_mode:1;         /* Interrupt format */
        uint32_t index_l:15;         /* Interrupt index bit 14-0 */
        uint32_t __head:12;          /* Should always be: 0x0fee */
#endif
    } QEMU_PACKED addr;
    uint32_t data;
};

/* When IR is enabled, all MSI/MSI-X data bits should be zero */
#define VTD_IR_MSI_DATA          (0)

/* The iommu (DMAR) device state struct */
struct IntelIOMMUState {
    X86IOMMUState x86_iommu;
    MemoryRegion csrmem;
    MemoryRegion mr_nodmar;
    MemoryRegion mr_ir;
    MemoryRegion mr_sys_alias;
    uint8_t csr[DMAR_REG_SIZE];     /* register values */
    uint8_t wmask[DMAR_REG_SIZE];   /* R/W bytes */
    uint8_t w1cmask[DMAR_REG_SIZE]; /* RW1C(Write 1 to Clear) bytes */
    uint8_t womask[DMAR_REG_SIZE];  /* WO (write only - read returns 0) */
    uint32_t version;

    bool caching_mode;              /* RO - is cap CM enabled? */
    bool scalable_mode;             /* RO - is Scalable Mode supported? */
    bool snoop_control;             /* RO - is SNP filed supported? */

    dma_addr_t root;                /* Current root table pointer */
    bool root_scalable;             /* Type of root table (scalable or not) */
    bool dmar_enabled;              /* Set if DMA remapping is enabled */

    uint16_t iq_head;               /* Current invalidation queue head */
    uint16_t iq_tail;               /* Current invalidation queue tail */
    dma_addr_t iq;                  /* Current invalidation queue pointer */
    uint16_t iq_size;               /* IQ Size in number of entries */
    bool iq_dw;                     /* IQ descriptor width 256bit or not */
    bool qi_enabled;                /* Set if the QI is enabled */
    uint8_t iq_last_desc_type;      /* The type of last completed descriptor */

    /* The index of the Fault Recording Register to be used next.
     * Wraps around from N-1 to 0, where N is the number of FRCD_REG.
     */
    uint16_t next_frcd_reg;

    uint64_t cap;                   /* The value of capability reg */
    uint64_t ecap;                  /* The value of extended capability reg */

    uint32_t context_cache_gen;     /* Should be in [1,MAX] */
    GHashTable *iotlb;              /* IOTLB */

    GHashTable *vtd_address_spaces;             /* VTD address spaces */
    VTDAddressSpace *vtd_as_cache[VTD_PCI_BUS_MAX]; /* VTD address space cache */
    /* list of registered notifiers */
    QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers;

    /* interrupt remapping */
    bool intr_enabled;              /* Whether guest enabled IR */
    dma_addr_t intr_root;           /* Interrupt remapping table pointer */
    uint32_t intr_size;             /* Number of IR table entries */
    bool intr_eime;                 /* Extended interrupt mode enabled */
    OnOffAuto intr_eim;             /* Toggle for EIM cabability */
    bool buggy_eim;                 /* Force buggy EIM unless eim=off */
    uint8_t aw_bits;                /* Host/IOVA address width (in bits) */
    bool dma_drain;                 /* Whether DMA r/w draining enabled */
    bool dma_translation;           /* Whether DMA translation supported */
    bool pasid;                     /* Whether to support PASID */

    /*
     * Protects IOMMU states in general.  Currently it protects the
     * per-IOMMU IOTLB cache, and context entry cache in VTDAddressSpace.
     */
    QemuMutex iommu_lock;
};

/* Find the VTD Address space associated with the given bus pointer,
 * create a new one if none exists
 */
VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
                                 int devfn, unsigned int pasid);

#endif