diff options
Diffstat (limited to 'hw/arm/smmuv3.c')
-rw-r--r-- | hw/arm/smmuv3.c | 418 |
1 files changed, 382 insertions, 36 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 9783309..39fbcbf 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -23,6 +23,7 @@ #include "hw/qdev-core.h" #include "hw/pci/pci.h" #include "exec/address-spaces.h" +#include "cpu.h" #include "trace.h" #include "qemu/log.h" #include "qemu/error-report.h" @@ -154,7 +155,7 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) EVT_SET_SID(&evt, info->sid); switch (info->type) { - case SMMU_EVT_OK: + case SMMU_EVT_NONE: return; case SMMU_EVT_F_UUT: EVT_SET_SSID(&evt, info->u.f_uut.ssid); @@ -312,12 +313,11 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, return 0; } -/* Returns <0 if the caller has no need to continue the translation */ +/* Returns < 0 in case of invalid STE, 0 otherwise */ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, STE *ste, SMMUEventInfo *event) { uint32_t config; - int ret = -EINVAL; if (!STE_VALID(ste)) { goto bad_ste; @@ -326,13 +326,13 @@ static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, config = STE_CONFIG(ste); if (STE_CFG_ABORT(config)) { - cfg->aborted = true; /* abort but don't record any event */ - return ret; + cfg->aborted = true; + return 0; } if (STE_CFG_BYPASS(config)) { cfg->bypassed = true; - return ret; + return 0; } if (STE_CFG_S2_ENABLED(config)) { @@ -509,7 +509,7 @@ bad_cd: * the different configuration decoding steps * @event: must be zero'ed by the caller * - * return < 0 if the translation needs to be aborted (@event is filled + * return < 0 in case of config decoding error (@event is filled * accordingly). Return 0 otherwise. */ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, @@ -518,34 +518,98 @@ static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); uint32_t sid = smmu_get_sid(sdev); SMMUv3State *s = sdev->smmu; - int ret = -EINVAL; + int ret; STE ste; CD cd; - if (smmu_find_ste(s, sid, &ste, event)) { + ret = smmu_find_ste(s, sid, &ste, event); + if (ret) { return ret; } - if (decode_ste(s, cfg, &ste, event)) { + ret = decode_ste(s, cfg, &ste, event); + if (ret) { return ret; } - if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) { + if (cfg->aborted || cfg->bypassed) { + return 0; + } + + ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); + if (ret) { return ret; } return decode_cd(cfg, &cd, event); } +/** + * smmuv3_get_config - Look up for a cached copy of configuration data for + * @sdev and on cache miss performs a configuration structure decoding from + * guest RAM. + * + * @sdev: SMMUDevice handle + * @event: output event info + * + * The configuration cache contains data resulting from both STE and CD + * decoding under the form of an SMMUTransCfg struct. The hash table is indexed + * by the SMMUDevice handle. + */ +static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + SMMUTransCfg *cfg; + + cfg = g_hash_table_lookup(bc->configs, sdev); + if (cfg) { + sdev->cfg_cache_hits++; + trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + } else { + sdev->cfg_cache_misses++; + trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + cfg = g_new0(SMMUTransCfg, 1); + + if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { + g_hash_table_insert(bc->configs, sdev, cfg); + } else { + g_free(cfg); + cfg = NULL; + } + } + return cfg; +} + +static void smmuv3_flush_config(SMMUDevice *sdev) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + + trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); + g_hash_table_remove(bc->configs, sdev); +} + static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, IOMMUAccessFlags flag, int iommu_idx) { SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); SMMUv3State *s = sdev->smmu; uint32_t sid = smmu_get_sid(sdev); - SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid}; + SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid}; SMMUPTWEventInfo ptw_info = {}; - SMMUTransCfg cfg = {}; + SMMUTranslationStatus status; + SMMUState *bs = ARM_SMMU(s); + uint64_t page_mask, aligned_addr; + IOMMUTLBEntry *cached_entry = NULL; + SMMUTransTableInfo *tt; + SMMUTransCfg *cfg = NULL; IOMMUTLBEntry entry = { .target_as = &address_space_memory, .iova = addr, @@ -553,23 +617,82 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; - int ret = 0; + SMMUIOTLBKey key, *new_key; + + qemu_mutex_lock(&s->mutex); if (!smmu_enabled(s)) { - goto out; + status = SMMU_TRANS_DISABLE; + goto epilogue; } - ret = smmuv3_decode_config(mr, &cfg, &event); - if (ret) { - goto out; + cfg = smmuv3_get_config(sdev, &event); + if (!cfg) { + status = SMMU_TRANS_ERROR; + goto epilogue; } - if (cfg.aborted) { - goto out; + if (cfg->aborted) { + status = SMMU_TRANS_ABORT; + goto epilogue; } - ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info); - if (ret) { + if (cfg->bypassed) { + status = SMMU_TRANS_BYPASS; + goto epilogue; + } + + tt = select_tt(cfg, addr); + if (!tt) { + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; + } + + page_mask = (1ULL << (tt->granule_sz)) - 1; + aligned_addr = addr & ~page_mask; + + key.asid = cfg->asid; + key.iova = aligned_addr; + + cached_entry = g_hash_table_lookup(bs->iotlb, &key); + if (cached_entry) { + cfg->iotlb_hits++; + trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { + status = SMMU_TRANS_ERROR; + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_PERMISSION; + event.u.f_permission.addr = addr; + event.u.f_permission.rnw = flag & 0x1; + } + } else { + status = SMMU_TRANS_SUCCESS; + } + goto epilogue; + } + + cfg->iotlb_misses++; + trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + + if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { + smmu_iotlb_inv_all(bs); + } + + cached_entry = g_new0(IOMMUTLBEntry, 1); + + if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + g_free(cached_entry); switch (ptw_info.type) { case SMMU_PTW_ERR_WALK_EABT: event.type = SMMU_EVT_F_WALK_EABT; @@ -609,25 +732,119 @@ static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, default: g_assert_not_reached(); } + status = SMMU_TRANS_ERROR; + } else { + new_key = g_new0(SMMUIOTLBKey, 1); + new_key->asid = cfg->asid; + new_key->iova = aligned_addr; + g_hash_table_insert(bs->iotlb, new_key, cached_entry); + status = SMMU_TRANS_SUCCESS; } -out: - if (ret) { + +epilogue: + qemu_mutex_unlock(&s->mutex); + switch (status) { + case SMMU_TRANS_SUCCESS: + entry.perm = flag; + entry.translated_addr = cached_entry->translated_addr + + (addr & page_mask); + entry.addr_mask = cached_entry->addr_mask; + trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, + entry.translated_addr, entry.perm); + break; + case SMMU_TRANS_DISABLE: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_BYPASS: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ABORT: + /* no event is recorded on abort */ + trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ERROR: qemu_log_mask(LOG_GUEST_ERROR, - "%s translation failed for iova=0x%"PRIx64"(%d)\n", - mr->parent_obj.name, addr, ret); - entry.perm = IOMMU_NONE; + "%s translation failed for iova=0x%"PRIx64"(%s)\n", + mr->parent_obj.name, addr, smmu_event_string(event.type)); smmuv3_record_event(s, &event); - } else if (!cfg.aborted) { - entry.perm = flag; - trace_smmuv3_translate(mr->parent_obj.name, sid, addr, - entry.translated_addr, entry.perm); + break; } return entry; } +/** + * smmuv3_notify_iova - call the notifier @n for a given + * @asid and @iova tuple. + * + * @mr: IOMMU mr region handle + * @n: notifier to be called + * @asid: address space ID or negative value if we don't care + * @iova: iova + */ +static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, + IOMMUNotifier *n, + int asid, + dma_addr_t iova) +{ + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); + SMMUEventInfo event = {}; + SMMUTransTableInfo *tt; + SMMUTransCfg *cfg; + IOMMUTLBEntry entry; + + cfg = smmuv3_get_config(sdev, &event); + if (!cfg) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s error decoding the configuration for iommu mr=%s\n", + __func__, mr->parent_obj.name); + return; + } + + if (asid >= 0 && cfg->asid != asid) { + return; + } + + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + + entry.target_as = &address_space_memory; + entry.iova = iova; + entry.addr_mask = (1 << tt->granule_sz) - 1; + entry.perm = IOMMU_NONE; + + memory_region_notify_one(n, &entry); +} + +/* invalidate an asid/iova tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) +{ + SMMUNotifierNode *node; + + QLIST_FOREACH(node, &s->notifiers_list, next) { + IOMMUMemoryRegion *mr = &node->sdev->iommu; + IOMMUNotifier *n; + + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); + + IOMMU_NOTIFIER_FOREACH(n, mr) { + smmuv3_notify_iova(mr, n, asid, iova); + } + } +} + static int smmuv3_cmdq_consume(SMMUv3State *s) { + SMMUState *bs = ARM_SMMU(s); SMMUCmdError cmd_error = SMMU_CERROR_NONE; SMMUQueue *q = &s->cmdq; SMMUCommandType type = 0; @@ -662,6 +879,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); + qemu_mutex_lock(&s->mutex); switch (type) { case SMMU_CMD_SYNC: if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { @@ -670,14 +888,111 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) break; case SMMU_CMD_PREFETCH_CONFIG: case SMMU_CMD_PREFETCH_ADDR: + break; case SMMU_CMD_CFGI_STE: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_ste(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + + break; + } case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ + { + uint32_t start = CMD_SID(&cmd), end, i; + uint8_t range = CMD_STE_RANGE(&cmd); + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + end = start + (1 << (range + 1)) - 1; + trace_smmuv3_cmdq_cfgi_ste_range(start, end); + + for (i = start; i <= end; i++) { + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); + SMMUDevice *sdev; + + if (!mr) { + continue; + } + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + } + break; + } case SMMU_CMD_CFGI_CD: case SMMU_CMD_CFGI_CD_ALL: - case SMMU_CMD_TLBI_NH_ALL: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_cd(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + break; + } case SMMU_CMD_TLBI_NH_ASID: - case SMMU_CMD_TLBI_NH_VA: + { + uint16_t asid = CMD_ASID(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_asid(asid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_asid(bs, asid); + break; + } + case SMMU_CMD_TLBI_NH_ALL: + case SMMU_CMD_TLBI_NSNH_ALL: + trace_smmuv3_cmdq_tlbi_nh(); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_all(bs); + break; case SMMU_CMD_TLBI_NH_VAA: + { + dma_addr_t addr = CMD_ADDR(&cmd); + uint16_t vmid = CMD_VMID(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); + smmuv3_inv_notifiers_iova(bs, -1, addr); + smmu_iotlb_inv_all(bs); + break; + } + case SMMU_CMD_TLBI_NH_VA: + { + uint16_t asid = CMD_ASID(&cmd); + uint16_t vmid = CMD_VMID(&cmd); + dma_addr_t addr = CMD_ADDR(&cmd); + bool leaf = CMD_LEAF(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); + smmuv3_inv_notifiers_iova(bs, asid, addr); + smmu_iotlb_inv_iova(bs, asid, addr); + break; + } case SMMU_CMD_TLBI_EL3_ALL: case SMMU_CMD_TLBI_EL3_VA: case SMMU_CMD_TLBI_EL2_ALL: @@ -686,7 +1001,6 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) case SMMU_CMD_TLBI_EL2_VAA: case SMMU_CMD_TLBI_S12_VMALL: case SMMU_CMD_TLBI_S2_IPA: - case SMMU_CMD_TLBI_NSNH_ALL: case SMMU_CMD_ATC_INV: case SMMU_CMD_PRI_RESP: case SMMU_CMD_RESUME: @@ -699,6 +1013,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s) "Illegal command type: %d\n", CMD_TYPE(&cmd)); break; } + qemu_mutex_unlock(&s->mutex); if (cmd_error) { break; } @@ -1078,6 +1393,8 @@ static void smmu_realize(DeviceState *d, Error **errp) return; } + qemu_mutex_init(&s->mutex); + memory_region_init_io(&sys->iomem, OBJECT(s), &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); @@ -1151,9 +1468,38 @@ static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, IOMMUNotifierFlag old, IOMMUNotifierFlag new) { + SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); + SMMUv3State *s3 = sdev->smmu; + SMMUState *s = &(s3->smmu_state); + SMMUNotifierNode *node = NULL; + SMMUNotifierNode *next_node = NULL; + + if (new & IOMMU_NOTIFIER_MAP) { + int bus_num = pci_bus_num(sdev->bus); + PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn); + + warn_report("SMMUv3 does not support notification on MAP: " + "device %s will not function properly", pcidev->name); + } + if (old == IOMMU_NOTIFIER_NONE) { - warn_report("SMMUV3 does not support vhost/vfio integration yet: " - "devices of those types will not function properly"); + trace_smmuv3_notify_flag_add(iommu->parent_obj.name); + node = g_malloc0(sizeof(*node)); + node->sdev = sdev; + QLIST_INSERT_HEAD(&s->notifiers_list, node, next); + return; + } + + /* update notifier node with new flags */ + QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) { + if (node->sdev == sdev) { + if (new == IOMMU_NOTIFIER_NONE) { + trace_smmuv3_notify_flag_del(iommu->parent_obj.name); + QLIST_REMOVE(node, next); + g_free(node); + } + return; + } } } |