aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-08-24 12:13:09 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-08-24 12:13:09 +0100
commit07d914cb9489f7acbd91ed675355674c8a5545b0 (patch)
tree97c1e898a78cddac7c3e1e12aa11f2fbb5694cc2 /hw
parentdd8014e4e904e895435aae9f11a686f072762782 (diff)
parentb34aa5129e9c3aff890b4f4bcc84962e94185629 (diff)
downloadqemu-07d914cb9489f7acbd91ed675355674c8a5545b0.zip
qemu-07d914cb9489f7acbd91ed675355674c8a5545b0.tar.gz
qemu-07d914cb9489f7acbd91ed675355674c8a5545b0.tar.bz2
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200824' into staging
target-arm queue: * hw/cpu/a9mpcore: Verify the machine use Cortex-A9 cores * hw/arm/smmuv3: Implement SMMUv3.2 range-invalidation * docs/system/arm: Document the Xilinx Versal Virt board * target/arm: Make M-profile NOCP take precedence over UNDEF * target/arm: Use correct FPST for VCMLA, VCADD on fp16 * target/arm: Various cleanups preparing for fp16 support # gpg: Signature made Mon 24 Aug 2020 10:47:14 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20200824: (27 commits) target/arm: Use correct FPST for VCMLA, VCADD on fp16 target/arm: Implement FPST_STD_F16 fpstatus target/arm: Make A32/T32 use new fpstatus_ptr() API target/arm: Replace A64 get_fpstatus_ptr() with generic fpstatus_ptr() target/arm: Delete unused ARM_FEATURE_CRC target/arm/translate.c: Delete/amend incorrect comments target/arm: Delete unused VFP_DREG macros target/arm: Remove ARCH macro target/arm: Convert T32 coprocessor insns to decodetree target/arm: Do M-profile NOCP checks early and via decodetree target/arm: Tidy up disas_arm_insn() target/arm: Convert A32 coprocessor insns to decodetree target/arm: Separate decode from handling of coproc insns target/arm: Pull handling of XScale insns out of disas_coproc_insn() docs/system/arm: Document the Xilinx Versal Virt board hw/arm/smmuv3: Advertise SMMUv3.2 range invalidation hw/arm/smmuv3: Support HAD and advertise SMMUv3.1 support hw/arm/smmuv3: Let AIDR advertise SMMUv3.0 support hw/arm/smmuv3: Fix IIDR offset hw/arm/smmuv3: Get prepared for range invalidation ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/smmu-common.c214
-rw-r--r--hw/arm/smmu-internal.h8
-rw-r--r--hw/arm/smmuv3-internal.h10
-rw-r--r--hw/arm/smmuv3.c142
-rw-r--r--hw/arm/trace-events12
-rw-r--r--hw/cpu/a9mpcore.c12
6 files changed, 251 insertions, 147 deletions
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index e13a5f4..3838db1 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -32,6 +32,91 @@
/* IOTLB Management */
+static guint smmu_iotlb_key_hash(gconstpointer v)
+{
+ SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
+ uint32_t a, b, c;
+
+ /* Jenkins hash */
+ a = b = c = JHASH_INITVAL + sizeof(*key);
+ a += key->asid + key->level + key->tg;
+ b += extract64(key->iova, 0, 32);
+ c += extract64(key->iova, 32, 32);
+
+ __jhash_mix(a, b, c);
+ __jhash_final(a, b, c);
+
+ return c;
+}
+
+static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
+{
+ SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
+
+ return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
+ (k1->level == k2->level) && (k1->tg == k2->tg);
+}
+
+SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova,
+ uint8_t tg, uint8_t level)
+{
+ SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level};
+
+ return key;
+}
+
+SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
+ SMMUTransTableInfo *tt, hwaddr iova)
+{
+ uint8_t tg = (tt->granule_sz - 10) / 2;
+ uint8_t inputsize = 64 - tt->tsz;
+ uint8_t stride = tt->granule_sz - 3;
+ uint8_t level = 4 - (inputsize - 4) / stride;
+ SMMUTLBEntry *entry = NULL;
+
+ while (level <= 3) {
+ uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
+ uint64_t mask = subpage_size - 1;
+ SMMUIOTLBKey key;
+
+ key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level);
+ entry = g_hash_table_lookup(bs->iotlb, &key);
+ if (entry) {
+ break;
+ }
+ level++;
+ }
+
+ if (entry) {
+ cfg->iotlb_hits++;
+ trace_smmu_iotlb_lookup_hit(cfg->asid, iova,
+ cfg->iotlb_hits, cfg->iotlb_misses,
+ 100 * cfg->iotlb_hits /
+ (cfg->iotlb_hits + cfg->iotlb_misses));
+ } else {
+ cfg->iotlb_misses++;
+ trace_smmu_iotlb_lookup_miss(cfg->asid, iova,
+ cfg->iotlb_hits, cfg->iotlb_misses,
+ 100 * cfg->iotlb_hits /
+ (cfg->iotlb_hits + cfg->iotlb_misses));
+ }
+ return entry;
+}
+
+void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
+{
+ SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
+ uint8_t tg = (new->granule - 10) / 2;
+
+ if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
+ smmu_iotlb_inv_all(bs);
+ }
+
+ *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level);
+ trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level);
+ g_hash_table_insert(bs->iotlb, key, new);
+}
+
inline void smmu_iotlb_inv_all(SMMUState *s)
{
trace_smmu_iotlb_inv_all();
@@ -44,15 +129,44 @@ static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
uint16_t asid = *(uint16_t *)user_data;
SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
- return iotlb_key->asid == asid;
+ return SMMU_IOTLB_ASID(*iotlb_key) == asid;
}
-inline void smmu_iotlb_inv_iova(SMMUState *s, uint16_t asid, dma_addr_t iova)
+static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value,
+ gpointer user_data)
{
- SMMUIOTLBKey key = {.asid = asid, .iova = iova};
+ SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
+ IOMMUTLBEntry *entry = &iter->entry;
+ SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
+ SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
- trace_smmu_iotlb_inv_iova(asid, iova);
- g_hash_table_remove(s->iotlb, &key);
+ if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
+ return false;
+ }
+ return ((info->iova & ~entry->addr_mask) == entry->iova) ||
+ ((entry->iova & ~info->mask) == info->iova);
+}
+
+inline void
+smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova,
+ uint8_t tg, uint64_t num_pages, uint8_t ttl)
+{
+ if (ttl && (num_pages == 1)) {
+ SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl);
+
+ g_hash_table_remove(s->iotlb, &key);
+ } else {
+ /* if tg is not set we use 4KB range invalidation */
+ uint8_t granule = tg ? tg * 2 + 10 : 12;
+
+ SMMUIOTLBPageInvInfo info = {
+ .asid = asid, .iova = iova,
+ .mask = (num_pages * 1 << granule) - 1};
+
+ g_hash_table_foreach_remove(s->iotlb,
+ smmu_hash_remove_by_asid_iova,
+ &info);
+ }
}
inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
@@ -149,7 +263,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
* @cfg: translation config
* @iova: iova to translate
* @perm: access type
- * @tlbe: IOMMUTLBEntry (out)
+ * @tlbe: SMMUTLBEntry (out)
* @info: handle to an error info
*
* Return 0 on success, < 0 on error. In case of error, @info is filled
@@ -159,7 +273,7 @@ SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
*/
static int smmu_ptw_64(SMMUTransCfg *cfg,
dma_addr_t iova, IOMMUAccessFlags perm,
- IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
dma_addr_t baseaddr, indexmask;
int stage = cfg->stage;
@@ -179,14 +293,11 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
baseaddr = extract64(tt->ttb, 0, 48);
baseaddr &= ~indexmask;
- tlbe->iova = iova;
- tlbe->addr_mask = (1 << granule_sz) - 1;
-
while (level <= 3) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
uint64_t mask = subpage_size - 1;
uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
- uint64_t pte;
+ uint64_t pte, gpa;
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t ap;
@@ -199,60 +310,50 @@ static int smmu_ptw_64(SMMUTransCfg *cfg,
if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
pte_addr, offset, pte);
- info->type = SMMU_PTW_ERR_TRANSLATION;
- goto error;
+ break;
}
- if (is_page_pte(pte, level)) {
- uint64_t gpa = get_page_pte_address(pte, granule_sz);
+ if (is_table_pte(pte, level)) {
+ ap = PTE_APTABLE(pte);
- ap = PTE_AP(pte);
- if (is_permission_fault(ap, perm)) {
+ if (is_permission_fault(ap, perm) && !tt->had) {
info->type = SMMU_PTW_ERR_PERMISSION;
goto error;
}
-
- tlbe->translated_addr = gpa + (iova & mask);
- tlbe->perm = PTE_AP_TO_PERM(ap);
+ baseaddr = get_table_pte_address(pte, granule_sz);
+ level++;
+ continue;
+ } else if (is_page_pte(pte, level)) {
+ gpa = get_page_pte_address(pte, granule_sz);
trace_smmu_ptw_page_pte(stage, level, iova,
baseaddr, pte_addr, pte, gpa);
- return 0;
- }
- if (is_block_pte(pte, level)) {
+ } else {
uint64_t block_size;
- hwaddr gpa = get_block_pte_address(pte, level, granule_sz,
- &block_size);
-
- ap = PTE_AP(pte);
- if (is_permission_fault(ap, perm)) {
- info->type = SMMU_PTW_ERR_PERMISSION;
- goto error;
- }
+ gpa = get_block_pte_address(pte, level, granule_sz,
+ &block_size);
trace_smmu_ptw_block_pte(stage, level, baseaddr,
pte_addr, pte, iova, gpa,
block_size >> 20);
-
- tlbe->translated_addr = gpa + (iova & mask);
- tlbe->perm = PTE_AP_TO_PERM(ap);
- return 0;
}
-
- /* table pte */
- ap = PTE_APTABLE(pte);
-
+ ap = PTE_AP(pte);
if (is_permission_fault(ap, perm)) {
info->type = SMMU_PTW_ERR_PERMISSION;
goto error;
}
- baseaddr = get_table_pte_address(pte, granule_sz);
- level++;
- }
+ tlbe->entry.translated_addr = gpa;
+ tlbe->entry.iova = iova & ~mask;
+ tlbe->entry.addr_mask = mask;
+ tlbe->entry.perm = PTE_AP_TO_PERM(ap);
+ tlbe->level = level;
+ tlbe->granule = granule_sz;
+ return 0;
+ }
info->type = SMMU_PTW_ERR_TRANSLATION;
error:
- tlbe->perm = IOMMU_NONE;
+ tlbe->entry.perm = IOMMU_NONE;
return -EINVAL;
}
@@ -268,7 +369,7 @@ error:
* return 0 on success
*/
inline int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
- IOMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
if (!cfg->aa64) {
/*
@@ -361,31 +462,6 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
return NULL;
}
-static guint smmu_iotlb_key_hash(gconstpointer v)
-{
- SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
- uint32_t a, b, c;
-
- /* Jenkins hash */
- a = b = c = JHASH_INITVAL + sizeof(*key);
- a += key->asid;
- b += extract64(key->iova, 0, 32);
- c += extract64(key->iova, 32, 32);
-
- __jhash_mix(a, b, c);
- __jhash_final(a, b, c);
-
- return c;
-}
-
-static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
-{
- const SMMUIOTLBKey *k1 = v1;
- const SMMUIOTLBKey *k2 = v2;
-
- return (k1->asid == k2->asid) && (k1->iova == k2->iova);
-}
-
/* Unmap the whole notifier's range */
static void smmu_unmap_notifier_range(IOMMUNotifier *n)
{
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index 7794d6d..55147f2 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -96,4 +96,12 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize,
MAKE_64BIT_MASK(0, gsz - 3);
}
+#define SMMU_IOTLB_ASID(key) ((key).asid)
+
+typedef struct SMMUIOTLBPageInvInfo {
+ int asid;
+ uint64_t iova;
+ uint64_t mask;
+} SMMUIOTLBPageInvInfo;
+
#endif
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 4112394..fa3c088 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -54,6 +54,8 @@ REG32(IDR1, 0x4)
REG32(IDR2, 0x8)
REG32(IDR3, 0xc)
+ FIELD(IDR3, HAD, 2, 1);
+ FIELD(IDR3, RIL, 10, 1);
REG32(IDR4, 0x10)
REG32(IDR5, 0x14)
FIELD(IDR5, OAS, 0, 3);
@@ -63,7 +65,8 @@ REG32(IDR5, 0x14)
#define SMMU_IDR5_OAS 4
-REG32(IIDR, 0x1c)
+REG32(IIDR, 0x18)
+REG32(AIDR, 0x1c)
REG32(CR0, 0x20)
FIELD(CR0, SMMU_ENABLE, 0, 1)
FIELD(CR0, EVENTQEN, 2, 1)
@@ -298,6 +301,8 @@ enum { /* Command completion notification */
};
#define CMD_TYPE(x) extract32((x)->word[0], 0 , 8)
+#define CMD_NUM(x) extract32((x)->word[0], 12 , 5)
+#define CMD_SCALE(x) extract32((x)->word[0], 20 , 5)
#define CMD_SSEC(x) extract32((x)->word[0], 10, 1)
#define CMD_SSV(x) extract32((x)->word[0], 11, 1)
#define CMD_RESUME_AC(x) extract32((x)->word[0], 12, 1)
@@ -310,6 +315,8 @@ enum { /* Command completion notification */
#define CMD_RESUME_STAG(x) extract32((x)->word[2], 0 , 16)
#define CMD_RESP(x) extract32((x)->word[2], 11, 2)
#define CMD_LEAF(x) extract32((x)->word[2], 0 , 1)
+#define CMD_TTL(x) extract32((x)->word[2], 8 , 2)
+#define CMD_TG(x) extract32((x)->word[2], 10, 2)
#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
#define CMD_ADDR(x) ({ \
uint64_t high = (uint64_t)(x)->word[3]; \
@@ -573,6 +580,7 @@ static inline int pa_range(STE *ste)
lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \
hi | lo; \
})
+#define CD_HAD(x, sel) extract32((x)->word[(sel) * 2 + 2], 1, 1)
#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
#define CD_TG(x, sel) extract32((x)->word[0], (16 * (sel)) + 6, 2)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 57a79df..0122700 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -254,6 +254,9 @@ static void smmuv3_init_regs(SMMUv3State *s)
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
+ s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
+ s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
+
/* 4K and 64K granule support */
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
@@ -270,6 +273,7 @@ static void smmuv3_init_regs(SMMUv3State *s)
s->features = 0;
s->sid_split = 0;
+ s->aidr = 0x1;
}
static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
@@ -506,7 +510,8 @@ static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
goto bad_cd;
}
- trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
+ tt->had = CD_HAD(cd, i);
+ trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
}
event->record_trans_faults = CD_R(cd);
@@ -626,7 +631,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
SMMUTranslationStatus status;
SMMUState *bs = ARM_SMMU(s);
uint64_t page_mask, aligned_addr;
- IOMMUTLBEntry *cached_entry = NULL;
+ SMMUTLBEntry *cached_entry = NULL;
SMMUTransTableInfo *tt;
SMMUTransCfg *cfg = NULL;
IOMMUTLBEntry entry = {
@@ -636,7 +641,6 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
.addr_mask = ~(hwaddr)0,
.perm = IOMMU_NONE,
};
- SMMUIOTLBKey key, *new_key;
qemu_mutex_lock(&s->mutex);
@@ -675,17 +679,9 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
page_mask = (1ULL << (tt->granule_sz)) - 1;
aligned_addr = addr & ~page_mask;
- key.asid = cfg->asid;
- key.iova = aligned_addr;
-
- cached_entry = g_hash_table_lookup(bs->iotlb, &key);
+ cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
if (cached_entry) {
- cfg->iotlb_hits++;
- trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
- cfg->iotlb_hits, cfg->iotlb_misses,
- 100 * cfg->iotlb_hits /
- (cfg->iotlb_hits + cfg->iotlb_misses));
- if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
+ if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
status = SMMU_TRANS_ERROR;
if (event.record_trans_faults) {
event.type = SMMU_EVT_F_PERMISSION;
@@ -698,17 +694,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
goto epilogue;
}
- cfg->iotlb_misses++;
- trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
- cfg->iotlb_hits, cfg->iotlb_misses,
- 100 * cfg->iotlb_hits /
- (cfg->iotlb_hits + cfg->iotlb_misses));
-
- if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
- smmu_iotlb_inv_all(bs);
- }
-
- cached_entry = g_new0(IOMMUTLBEntry, 1);
+ cached_entry = g_new0(SMMUTLBEntry, 1);
if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
g_free(cached_entry);
@@ -753,10 +739,7 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
}
status = SMMU_TRANS_ERROR;
} else {
- new_key = g_new0(SMMUIOTLBKey, 1);
- new_key->asid = cfg->asid;
- new_key->iova = aligned_addr;
- g_hash_table_insert(bs->iotlb, new_key, cached_entry);
+ smmu_iotlb_insert(bs, cfg, cached_entry);
status = SMMU_TRANS_SUCCESS;
}
@@ -765,9 +748,9 @@ epilogue:
switch (status) {
case SMMU_TRANS_SUCCESS:
entry.perm = flag;
- entry.translated_addr = cached_entry->translated_addr +
- (addr & page_mask);
- entry.addr_mask = cached_entry->addr_mask;
+ entry.translated_addr = cached_entry->entry.translated_addr +
+ (addr & cached_entry->entry.addr_mask);
+ entry.addr_mask = cached_entry->entry.addr_mask;
trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
entry.translated_addr, entry.perm);
break;
@@ -807,42 +790,49 @@ epilogue:
* @n: notifier to be called
* @asid: address space ID or negative value if we don't care
* @iova: iova
+ * @tg: translation granule (if communicated through range invalidation)
+ * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
*/
static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
IOMMUNotifier *n,
- int asid,
- dma_addr_t iova)
+ int asid, dma_addr_t iova,
+ uint8_t tg, uint64_t num_pages)
{
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
- SMMUEventInfo event = {.inval_ste_allowed = true};
- SMMUTransTableInfo *tt;
- SMMUTransCfg *cfg;
IOMMUTLBEntry entry;
+ uint8_t granule = tg;
- cfg = smmuv3_get_config(sdev, &event);
- if (!cfg) {
- return;
- }
+ if (!tg) {
+ SMMUEventInfo event = {.inval_ste_allowed = true};
+ SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
+ SMMUTransTableInfo *tt;
- if (asid >= 0 && cfg->asid != asid) {
- return;
- }
+ if (!cfg) {
+ return;
+ }
- tt = select_tt(cfg, iova);
- if (!tt) {
- return;
+ if (asid >= 0 && cfg->asid != asid) {
+ return;
+ }
+
+ tt = select_tt(cfg, iova);
+ if (!tt) {
+ return;
+ }
+ granule = tt->granule_sz;
}
entry.target_as = &address_space_memory;
entry.iova = iova;
- entry.addr_mask = (1 << tt->granule_sz) - 1;
+ entry.addr_mask = num_pages * (1 << granule) - 1;
entry.perm = IOMMU_NONE;
memory_region_notify_one(n, &entry);
}
-/* invalidate an asid/iova tuple in all mr's */
-static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
+/* invalidate an asid/iova range tuple in all mr's */
+static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
+ uint8_t tg, uint64_t num_pages)
{
SMMUDevice *sdev;
@@ -850,14 +840,41 @@ static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
IOMMUMemoryRegion *mr = &sdev->iommu;
IOMMUNotifier *n;
- trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
+ trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
+ tg, num_pages);
IOMMU_NOTIFIER_FOREACH(n, mr) {
- smmuv3_notify_iova(mr, n, asid, iova);
+ smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
}
}
}
+static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
+{
+ uint8_t scale = 0, num = 0, ttl = 0;
+ dma_addr_t addr = CMD_ADDR(cmd);
+ uint8_t type = CMD_TYPE(cmd);
+ uint16_t vmid = CMD_VMID(cmd);
+ bool leaf = CMD_LEAF(cmd);
+ uint8_t tg = CMD_TG(cmd);
+ hwaddr num_pages = 1;
+ int asid = -1;
+
+ if (tg) {
+ scale = CMD_SCALE(cmd);
+ num = CMD_NUM(cmd);
+ ttl = CMD_TTL(cmd);
+ num_pages = (num + 1) * (1 << (scale));
+ }
+
+ if (type == SMMU_CMD_TLBI_NH_VA) {
+ asid = CMD_ASID(cmd);
+ }
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
+ smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
+}
+
static int smmuv3_cmdq_consume(SMMUv3State *s)
{
SMMUState *bs = ARM_SMMU(s);
@@ -988,27 +1005,9 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
smmu_iotlb_inv_all(bs);
break;
case SMMU_CMD_TLBI_NH_VAA:
- {
- dma_addr_t addr = CMD_ADDR(&cmd);
- uint16_t vmid = CMD_VMID(&cmd);
-
- trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
- smmuv3_inv_notifiers_iova(bs, -1, addr);
- smmu_iotlb_inv_all(bs);
- break;
- }
case SMMU_CMD_TLBI_NH_VA:
- {
- uint16_t asid = CMD_ASID(&cmd);
- uint16_t vmid = CMD_VMID(&cmd);
- dma_addr_t addr = CMD_ADDR(&cmd);
- bool leaf = CMD_LEAF(&cmd);
-
- trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
- smmuv3_inv_notifiers_iova(bs, asid, addr);
- smmu_iotlb_inv_iova(bs, asid, addr);
+ smmuv3_s1_range_inval(bs, &cmd);
break;
- }
case SMMU_CMD_TLBI_EL3_ALL:
case SMMU_CMD_TLBI_EL3_VA:
case SMMU_CMD_TLBI_EL2_ALL:
@@ -1257,6 +1256,9 @@ static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
case A_IIDR:
*data = s->iidr;
return MEMTX_OK;
+ case A_AIDR:
+ *data = s->aidr;
+ return MEMTX_OK;
case A_CR0:
*data = s->cr[0];
return MEMTX_OK;
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 0acedce..c8a4d80 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -14,6 +14,9 @@ smmu_iotlb_inv_all(void) "IOTLB invalidate all"
smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d"
smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64
smmu_inv_notifiers_mr(const char *name) "iommu mr=%s"
+smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
+smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
+smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d"
# smmuv3.c
smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)"
@@ -36,20 +39,17 @@ smmuv3_translate_abort(const char *n, uint16_t sid, uint64_t addr, bool is_write
smmuv3_translate_success(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=%d iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x"
smmuv3_get_cd(uint64_t addr) "CD addr: 0x%"PRIx64
smmuv3_decode_cd(uint32_t oas) "oas=%d"
-smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d"
+smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz, bool had) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d had:%d"
smmuv3_cmdq_cfgi_ste(int streamid) "streamid =%d"
smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%d - end=0x%d"
smmuv3_cmdq_cfgi_cd(uint32_t sid) "streamid = %d"
smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid %d (hits=%d, misses=%d, hit rate=%d)"
smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid %d (hits=%d, misses=%d, hit rate=%d)"
-smmuv3_cmdq_tlbi_nh_va(int vmid, int asid, uint64_t addr, bool leaf) "vmid =%d asid =%d addr=0x%"PRIx64" leaf=%d"
-smmuv3_cmdq_tlbi_nh_vaa(int vmid, uint64_t addr) "vmid =%d addr=0x%"PRIx64
+smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid =%d asid =%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d"
smmuv3_cmdq_tlbi_nh(void) ""
smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d"
-smmu_iotlb_cache_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
-smmu_iotlb_cache_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d"
smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid %d"
smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s"
smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s"
-smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova) "iommu mr=%s asid=%d iova=0x%"PRIx64
+smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64
diff --git a/hw/cpu/a9mpcore.c b/hw/cpu/a9mpcore.c
index 351295e..ec186d4 100644
--- a/hw/cpu/a9mpcore.c
+++ b/hw/cpu/a9mpcore.c
@@ -15,6 +15,7 @@
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/core/cpu.h"
+#include "cpu.h"
#define A9_GIC_NUM_PRIORITY_BITS 5
@@ -52,8 +53,18 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp)
*wdtbusdev;
int i;
bool has_el3;
+ CPUState *cpu0;
Object *cpuobj;
+ cpu0 = qemu_get_cpu(0);
+ cpuobj = OBJECT(cpu0);
+ if (strcmp(object_get_typename(cpuobj), ARM_CPU_TYPE_NAME("cortex-a9"))) {
+ /* We might allow Cortex-A5 once we model it */
+ error_setg(errp,
+ "Cortex-A9MPCore peripheral can only use Cortex-A9 CPU");
+ return;
+ }
+
scudev = DEVICE(&s->scu);
qdev_prop_set_uint32(scudev, "num-cpu", s->num_cpu);
if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
@@ -70,7 +81,6 @@ static void a9mp_priv_realize(DeviceState *dev, Error **errp)
/* Make the GIC's TZ support match the CPUs. We assume that
* either all the CPUs have TZ, or none do.
*/
- cpuobj = OBJECT(qemu_get_cpu(0));
has_el3 = object_property_find(cpuobj, "has_el3", NULL) &&
object_property_get_bool(cpuobj, "has_el3", &error_abort);
qdev_prop_set_bit(gicdev, "has-security-extensions", has_el3);