diff options
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | accel/kvm/kvm-all.c | 239 | ||||
-rwxr-xr-x | configure | 2 | ||||
-rw-r--r-- | crypto/block-luks.c | 1025 | ||||
-rw-r--r-- | hw/arm/boot.c | 12 | ||||
-rw-r--r-- | hw/s390x/event-facility.c | 3 | ||||
-rw-r--r-- | hw/s390x/s390-pci-bus.c | 7 | ||||
-rw-r--r-- | hw/s390x/s390-virtio-ccw.c | 30 | ||||
-rw-r--r-- | hw/s390x/sclp.c | 37 | ||||
-rw-r--r-- | include/sysemu/kvm_int.h | 1 | ||||
-rw-r--r-- | linux-user/arm/cpu_loop.c | 3 | ||||
-rw-r--r-- | linux-user/arm/target_syscall.h | 3 | ||||
-rw-r--r-- | target/arm/helper.c | 115 | ||||
-rw-r--r-- | target/arm/m_helper.c | 18 | ||||
-rw-r--r-- | target/arm/translate.c | 30 | ||||
-rw-r--r-- | target/s390x/kvm.c | 10 | ||||
-rw-r--r-- | tests/tcg/aarch64/Makefile.target | 5 | ||||
-rw-r--r-- | tests/tcg/arm/Makefile.target | 6 | ||||
-rw-r--r-- | tests/tcg/arm/semihosting.c | 45 |
19 files changed, 926 insertions, 667 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index bd7ee23..21264ea 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1206,7 +1206,7 @@ T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org S390 PCI -M: Collin Walling <walling@linux.ibm.com> +M: Matthew Rosato <mjrosato@linux.ibm.com> S: Supported F: hw/s390x/s390-pci* L: qemu-s390x@nongnu.org diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index b09bad0..aabe097 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -140,6 +140,7 @@ bool kvm_direct_msi_allowed; bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; static bool kvm_immediate_exit; +static hwaddr kvm_max_slot_size = ~0; static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_INFO(USER_MEMORY), @@ -437,7 +438,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, static int kvm_section_update_flags(KVMMemoryListener *kml, MemoryRegionSection *section) { - hwaddr start_addr, size; + hwaddr start_addr, size, slot_size; KVMSlot *mem; int ret = 0; @@ -448,13 +449,18 @@ static int kvm_section_update_flags(KVMMemoryListener *kml, kvm_slots_lock(kml); - mem = kvm_lookup_matching_slot(kml, start_addr, size); - if (!mem) { - /* We don't have a slot if we want to trap every access. */ - goto out; - } + while (size && !ret) { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + /* We don't have a slot if we want to trap every access. */ + goto out; + } - ret = kvm_slot_update_flags(kml, mem, section->mr); + ret = kvm_slot_update_flags(kml, mem, section->mr); + start_addr += slot_size; + size -= slot_size; + } out: kvm_slots_unlock(kml); @@ -527,11 +533,15 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, struct kvm_dirty_log d = {}; KVMSlot *mem; hwaddr start_addr, size; + hwaddr slot_size, slot_offset = 0; int ret = 0; size = kvm_align_section(section, &start_addr); - if (size) { - mem = kvm_lookup_matching_slot(kml, start_addr, size); + while (size) { + MemoryRegionSection subsection = *section; + + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); if (!mem) { /* We don't have a slot if we want to trap every access. */ goto out; @@ -549,11 +559,11 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, * So for now, let's align to 64 instead of HOST_LONG_BITS here, in * a hope that sizeof(long) won't become >8 any time soon. */ - size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), - /*HOST_LONG_BITS*/ 64) / 8; if (!mem->dirty_bmap) { + hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), + /*HOST_LONG_BITS*/ 64) / 8; /* Allocate on the first log_sync, once and for all */ - mem->dirty_bmap = g_malloc0(size); + mem->dirty_bmap = g_malloc0(bitmap_size); } d.dirty_bitmap = mem->dirty_bmap; @@ -564,7 +574,13 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, goto out; } - kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); + subsection.offset_within_region += slot_offset; + subsection.size = int128_make64(slot_size); + kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap); + + slot_offset += slot_size; + start_addr += slot_size; + size -= slot_size; } out: return ret; @@ -575,63 +591,22 @@ out: #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT) #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) -/** - * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range - * - * NOTE: this will be a no-op if we haven't enabled manual dirty log - * protection in the host kernel because in that case this operation - * will be done within log_sync(). - * - * @kml: the kvm memory listener - * @section: the memory range to clear dirty bitmap - */ -static int kvm_physical_log_clear(KVMMemoryListener *kml, - MemoryRegionSection *section) +static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, + uint64_t size) { KVMState *s = kvm_state; + uint64_t end, bmap_start, start_delta, bmap_npages; struct kvm_clear_dirty_log d; - uint64_t start, end, bmap_start, start_delta, bmap_npages, size; unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; - KVMSlot *mem = NULL; - int ret, i; - - if (!s->manual_dirty_log_protect) { - /* No need to do explicit clear */ - return 0; - } - - start = section->offset_within_address_space; - size = int128_get64(section->size); - - if (!size) { - /* Nothing more we can do... */ - return 0; - } - - kvm_slots_lock(kml); - - /* Find any possible slot that covers the section */ - for (i = 0; i < s->nr_slots; i++) { - mem = &kml->slots[i]; - if (mem->start_addr <= start && - start + size <= mem->start_addr + mem->memory_size) { - break; - } - } - - /* - * We should always find one memslot until this point, otherwise - * there could be something wrong from the upper layer - */ - assert(mem && i != s->nr_slots); + int ret; /* * We need to extend either the start or the size or both to * satisfy the KVM interface requirement. Firstly, do the start * page alignment on 64 host pages */ - bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK; - start_delta = start - mem->start_addr - bmap_start; + bmap_start = start & KVM_CLEAR_LOG_MASK; + start_delta = start - bmap_start; bmap_start /= psize; /* @@ -694,7 +669,7 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml, /* It should never overflow. If it happens, say something */ assert(bmap_npages <= UINT32_MAX); d.num_pages = bmap_npages; - d.slot = mem->slot | (kml->as_id << 16); + d.slot = mem->slot | (as_id << 16); if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) { ret = -errno; @@ -717,6 +692,66 @@ static int kvm_physical_log_clear(KVMMemoryListener *kml, size / psize); /* This handles the NULL case well */ g_free(bmap_clear); + return ret; +} + + +/** + * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range + * + * NOTE: this will be a no-op if we haven't enabled manual dirty log + * protection in the host kernel because in that case this operation + * will be done within log_sync(). + * + * @kml: the kvm memory listener + * @section: the memory range to clear dirty bitmap + */ +static int kvm_physical_log_clear(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + KVMState *s = kvm_state; + uint64_t start, size, offset, count; + KVMSlot *mem; + int ret, i; + + if (!s->manual_dirty_log_protect) { + /* No need to do explicit clear */ + return 0; + } + + start = section->offset_within_address_space; + size = int128_get64(section->size); + + if (!size) { + /* Nothing more we can do... */ + return 0; + } + + kvm_slots_lock(kml); + + for (i = 0; i < s->nr_slots; i++) { + mem = &kml->slots[i]; + /* Discard slots that are empty or do not overlap the section */ + if (!mem->memory_size || + mem->start_addr > start + size - 1 || + start > mem->start_addr + mem->memory_size - 1) { + continue; + } + + if (start >= mem->start_addr) { + /* The slot starts before section or is aligned to it. */ + offset = start - mem->start_addr; + count = MIN(mem->memory_size - offset, size); + } else { + /* The slot starts after section. */ + offset = 0; + count = MIN(mem->memory_size, size - (mem->start_addr - start)); + } + ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); + if (ret < 0) { + break; + } + } kvm_slots_unlock(kml); @@ -953,6 +988,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) return NULL; } +void kvm_set_max_memslot_size(hwaddr max_slot_size) +{ + g_assert( + ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size + ); + kvm_max_slot_size = max_slot_size; +} + static void kvm_set_phys_mem(KVMMemoryListener *kml, MemoryRegionSection *section, bool add) { @@ -960,7 +1003,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, int err; MemoryRegion *mr = section->mr; bool writeable = !mr->readonly && !mr->rom_device; - hwaddr start_addr, size; + hwaddr start_addr, size, slot_size; void *ram; if (!memory_region_is_ram(mr)) { @@ -985,41 +1028,52 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, kvm_slots_lock(kml); if (!add) { - mem = kvm_lookup_matching_slot(kml, start_addr, size); - if (!mem) { - goto out; - } - if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { - kvm_physical_sync_dirty_bitmap(kml, section); - } + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + goto out; + } + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_physical_sync_dirty_bitmap(kml, section); + } - /* unregister the slot */ - g_free(mem->dirty_bmap); - mem->dirty_bmap = NULL; - mem->memory_size = 0; - mem->flags = 0; - err = kvm_set_user_memory_region(kml, mem, false); - if (err) { - fprintf(stderr, "%s: error unregistering slot: %s\n", - __func__, strerror(-err)); - abort(); - } + /* unregister the slot */ + g_free(mem->dirty_bmap); + mem->dirty_bmap = NULL; + mem->memory_size = 0; + mem->flags = 0; + err = kvm_set_user_memory_region(kml, mem, false); + if (err) { + fprintf(stderr, "%s: error unregistering slot: %s\n", + __func__, strerror(-err)); + abort(); + } + start_addr += slot_size; + size -= slot_size; + } while (size); goto out; } /* register the new slot */ - mem = kvm_alloc_slot(kml); - mem->memory_size = size; - mem->start_addr = start_addr; - mem->ram = ram; - mem->flags = kvm_mem_flags(mr); - - err = kvm_set_user_memory_region(kml, mem, true); - if (err) { - fprintf(stderr, "%s: error registering slot: %s\n", __func__, - strerror(-err)); - abort(); - } + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_alloc_slot(kml); + mem->memory_size = slot_size; + mem->start_addr = start_addr; + mem->ram = ram; + mem->flags = kvm_mem_flags(mr); + + err = kvm_set_user_memory_region(kml, mem, true); + if (err) { + fprintf(stderr, "%s: error registering slot: %s\n", __func__, + strerror(-err)); + abort(); + } + start_addr += slot_size; + ram += slot_size; + size -= slot_size; + } while (size); out: kvm_slots_unlock(kml); @@ -2859,6 +2913,7 @@ static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, for (i = 0; i < kvm->nr_as; ++i) { if (kvm->as[i].as == as && kvm->as[i].ml) { + size = MIN(kvm_max_slot_size, size); return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, start_addr, size); } @@ -728,7 +728,7 @@ ARCH= # Normalise host CPU name and set ARCH. # Note that this case should only have supported host CPUs, not guests. case "$cpu" in - ppc|ppc64|s390|s390x|sparc64|x32|riscv32|riscv64) + ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64) supported_cpu="yes" ;; ppc64le) diff --git a/crypto/block-luks.c b/crypto/block-luks.c index 743949a..4861db8 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -143,7 +143,7 @@ struct QCryptoBlockLUKSKeySlot { /* salt for PBKDF2 */ uint8_t salt[QCRYPTO_BLOCK_LUKS_SALT_LEN]; /* start sector of key material */ - uint32_t key_offset; + uint32_t key_offset_sector; /* number of anti-forensic stripes */ uint32_t stripes; }; @@ -172,10 +172,10 @@ struct QCryptoBlockLUKSHeader { char hash_spec[QCRYPTO_BLOCK_LUKS_HASH_SPEC_LEN]; /* start offset of the volume data (in 512 byte sectors) */ - uint32_t payload_offset; + uint32_t payload_offset_sector; /* Number of key bytes */ - uint32_t key_bytes; + uint32_t master_key_len; /* master key checksum after PBKDF2 */ uint8_t master_key_digest[QCRYPTO_BLOCK_LUKS_DIGEST_LEN]; @@ -199,13 +199,25 @@ QEMU_BUILD_BUG_ON(sizeof(struct QCryptoBlockLUKSHeader) != 592); struct QCryptoBlockLUKS { QCryptoBlockLUKSHeader header; - /* Cache parsed versions of what's in header fields, - * as we can't rely on QCryptoBlock.cipher being - * non-NULL */ + /* Main encryption algorithm used for encryption*/ QCryptoCipherAlgorithm cipher_alg; + + /* Mode of encryption for the selected encryption algorithm */ QCryptoCipherMode cipher_mode; + + /* Initialization vector generation algorithm */ QCryptoIVGenAlgorithm ivgen_alg; + + /* Hash algorithm used for IV generation*/ QCryptoHashAlgorithm ivgen_hash_alg; + + /* + * Encryption algorithm used for IV generation. + * Usually the same as main encryption algorithm + */ + QCryptoCipherAlgorithm ivgen_cipher_alg; + + /* Hash algorithm used in pbkdf2 function */ QCryptoHashAlgorithm hash_alg; }; @@ -398,6 +410,466 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher, } /* + * Returns number of sectors needed to store the key material + * given number of anti forensic stripes + */ +static int +qcrypto_block_luks_splitkeylen_sectors(const QCryptoBlockLUKS *luks, + unsigned int header_sectors, + unsigned int stripes) +{ + /* + * This calculation doesn't match that shown in the spec, + * but instead follows the cryptsetup implementation. + */ + + size_t splitkeylen = luks->header.master_key_len * stripes; + + /* First align the key material size to block size*/ + size_t splitkeylen_sectors = + DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE); + + /* Then also align the key material size to the size of the header */ + return ROUND_UP(splitkeylen_sectors, header_sectors); +} + +/* + * Stores the main LUKS header, taking care of endianess + */ +static int +qcrypto_block_luks_store_header(QCryptoBlock *block, + QCryptoBlockWriteFunc writefunc, + void *opaque, + Error **errp) +{ + const QCryptoBlockLUKS *luks = block->opaque; + Error *local_err = NULL; + size_t i; + g_autofree QCryptoBlockLUKSHeader *hdr_copy = NULL; + + /* Create a copy of the header */ + hdr_copy = g_new0(QCryptoBlockLUKSHeader, 1); + memcpy(hdr_copy, &luks->header, sizeof(QCryptoBlockLUKSHeader)); + + /* + * Everything on disk uses Big Endian (tm), so flip header fields + * before writing them + */ + cpu_to_be16s(&hdr_copy->version); + cpu_to_be32s(&hdr_copy->payload_offset_sector); + cpu_to_be32s(&hdr_copy->master_key_len); + cpu_to_be32s(&hdr_copy->master_key_iterations); + + for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { + cpu_to_be32s(&hdr_copy->key_slots[i].active); + cpu_to_be32s(&hdr_copy->key_slots[i].iterations); + cpu_to_be32s(&hdr_copy->key_slots[i].key_offset_sector); + cpu_to_be32s(&hdr_copy->key_slots[i].stripes); + } + + /* Write out the partition header and key slot headers */ + writefunc(block, 0, (const uint8_t *)hdr_copy, sizeof(*hdr_copy), + opaque, &local_err); + + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + return 0; +} + +/* + * Loads the main LUKS header,and byteswaps it to native endianess + * And run basic sanity checks on it + */ +static int +qcrypto_block_luks_load_header(QCryptoBlock *block, + QCryptoBlockReadFunc readfunc, + void *opaque, + Error **errp) +{ + ssize_t rv; + size_t i; + QCryptoBlockLUKS *luks = block->opaque; + + /* + * Read the entire LUKS header, minus the key material from + * the underlying device + */ + rv = readfunc(block, 0, + (uint8_t *)&luks->header, + sizeof(luks->header), + opaque, + errp); + if (rv < 0) { + return rv; + } + + /* + * The header is always stored in big-endian format, so + * convert everything to native + */ + be16_to_cpus(&luks->header.version); + be32_to_cpus(&luks->header.payload_offset_sector); + be32_to_cpus(&luks->header.master_key_len); + be32_to_cpus(&luks->header.master_key_iterations); + + for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { + be32_to_cpus(&luks->header.key_slots[i].active); + be32_to_cpus(&luks->header.key_slots[i].iterations); + be32_to_cpus(&luks->header.key_slots[i].key_offset_sector); + be32_to_cpus(&luks->header.key_slots[i].stripes); + } + + return 0; +} + +/* + * Does basic sanity checks on the LUKS header + */ +static int +qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) +{ + size_t i, j; + + unsigned int header_sectors = QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; + + if (memcmp(luks->header.magic, qcrypto_block_luks_magic, + QCRYPTO_BLOCK_LUKS_MAGIC_LEN) != 0) { + error_setg(errp, "Volume is not in LUKS format"); + return -1; + } + + if (luks->header.version != QCRYPTO_BLOCK_LUKS_VERSION) { + error_setg(errp, "LUKS version %" PRIu32 " is not supported", + luks->header.version); + return -1; + } + + /* Check all keyslots for corruption */ + for (i = 0 ; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS ; i++) { + + const QCryptoBlockLUKSKeySlot *slot1 = &luks->header.key_slots[i]; + unsigned int start1 = slot1->key_offset_sector; + unsigned int len1 = + qcrypto_block_luks_splitkeylen_sectors(luks, + header_sectors, + slot1->stripes); + + if (slot1->stripes == 0) { + error_setg(errp, "Keyslot %zu is corrupted (stripes == 0)", i); + return -1; + } + + if (slot1->active != QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED && + slot1->active != QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED) { + error_setg(errp, + "Keyslot %zu state (active/disable) is corrupted", i); + return -1; + } + + if (start1 + len1 > luks->header.payload_offset_sector) { + error_setg(errp, + "Keyslot %zu is overlapping with the encrypted payload", + i); + return -1; + } + + for (j = i + 1 ; j < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS ; j++) { + const QCryptoBlockLUKSKeySlot *slot2 = &luks->header.key_slots[j]; + unsigned int start2 = slot2->key_offset_sector; + unsigned int len2 = + qcrypto_block_luks_splitkeylen_sectors(luks, + header_sectors, + slot2->stripes); + + if (start1 + len1 > start2 && start2 + len2 > start1) { + error_setg(errp, + "Keyslots %zu and %zu are overlapping in the header", + i, j); + return -1; + } + } + + } + return 0; +} + +/* + * Parses the crypto parameters that are stored in the LUKS header + */ + +static int +qcrypto_block_luks_parse_header(QCryptoBlockLUKS *luks, Error **errp) +{ + g_autofree char *cipher_mode = g_strdup(luks->header.cipher_mode); + char *ivgen_name, *ivhash_name; + Error *local_err = NULL; + + /* + * The cipher_mode header contains a string that we have + * to further parse, of the format + * + * <cipher-mode>-<iv-generator>[:<iv-hash>] + * + * eg cbc-essiv:sha256, cbc-plain64 + */ + ivgen_name = strchr(cipher_mode, '-'); + if (!ivgen_name) { + error_setg(errp, "Unexpected cipher mode string format %s", + luks->header.cipher_mode); + return -1; + } + *ivgen_name = '\0'; + ivgen_name++; + + ivhash_name = strchr(ivgen_name, ':'); + if (!ivhash_name) { + luks->ivgen_hash_alg = 0; + } else { + *ivhash_name = '\0'; + ivhash_name++; + + luks->ivgen_hash_alg = qcrypto_block_luks_hash_name_lookup(ivhash_name, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + } + + luks->cipher_mode = qcrypto_block_luks_cipher_mode_lookup(cipher_mode, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + luks->cipher_alg = + qcrypto_block_luks_cipher_name_lookup(luks->header.cipher_name, + luks->cipher_mode, + luks->header.master_key_len, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + luks->hash_alg = + qcrypto_block_luks_hash_name_lookup(luks->header.hash_spec, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + luks->ivgen_alg = qcrypto_block_luks_ivgen_name_lookup(ivgen_name, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + if (luks->ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { + if (!ivhash_name) { + error_setg(errp, "Missing IV generator hash specification"); + return -1; + } + luks->ivgen_cipher_alg = + qcrypto_block_luks_essiv_cipher(luks->cipher_alg, + luks->ivgen_hash_alg, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + } else { + + /* + * Note we parsed the ivhash_name earlier in the cipher_mode + * spec string even with plain/plain64 ivgens, but we + * will ignore it, since it is irrelevant for these ivgens. + * This is for compat with dm-crypt which will silently + * ignore hash names with these ivgens rather than report + * an error about the invalid usage + */ + luks->ivgen_cipher_alg = luks->cipher_alg; + } + return 0; +} + +/* + * Given a key slot, user password, and the master key, + * will store the encrypted master key there, and update the + * in-memory header. User must then write the in-memory header + * + * Returns: + * 0 if the keyslot was written successfully + * with the provided password + * -1 if a fatal error occurred while storing the key + */ +static int +qcrypto_block_luks_store_key(QCryptoBlock *block, + unsigned int slot_idx, + const char *password, + uint8_t *masterkey, + uint64_t iter_time, + QCryptoBlockWriteFunc writefunc, + void *opaque, + Error **errp) +{ + QCryptoBlockLUKS *luks = block->opaque; + QCryptoBlockLUKSKeySlot *slot = &luks->header.key_slots[slot_idx]; + g_autofree uint8_t *splitkey = NULL; + size_t splitkeylen; + g_autofree uint8_t *slotkey = NULL; + g_autoptr(QCryptoCipher) cipher = NULL; + g_autoptr(QCryptoIVGen) ivgen = NULL; + Error *local_err = NULL; + uint64_t iters; + int ret = -1; + + if (qcrypto_random_bytes(slot->salt, + QCRYPTO_BLOCK_LUKS_SALT_LEN, + errp) < 0) { + goto cleanup; + } + + splitkeylen = luks->header.master_key_len * slot->stripes; + + /* + * Determine how many iterations are required to + * hash the user password while consuming 1 second of compute + * time + */ + iters = qcrypto_pbkdf2_count_iters(luks->hash_alg, + (uint8_t *)password, strlen(password), + slot->salt, + QCRYPTO_BLOCK_LUKS_SALT_LEN, + luks->header.master_key_len, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto cleanup; + } + + if (iters > (ULLONG_MAX / iter_time)) { + error_setg_errno(errp, ERANGE, + "PBKDF iterations %llu too large to scale", + (unsigned long long)iters); + goto cleanup; + } + + /* iter_time was in millis, but count_iters reported for secs */ + iters = iters * iter_time / 1000; + + if (iters > UINT32_MAX) { + error_setg_errno(errp, ERANGE, + "PBKDF iterations %llu larger than %u", + (unsigned long long)iters, UINT32_MAX); + goto cleanup; + } + + slot->iterations = + MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS); + + + /* + * Generate a key that we'll use to encrypt the master + * key, from the user's password + */ + slotkey = g_new0(uint8_t, luks->header.master_key_len); + if (qcrypto_pbkdf2(luks->hash_alg, + (uint8_t *)password, strlen(password), + slot->salt, + QCRYPTO_BLOCK_LUKS_SALT_LEN, + slot->iterations, + slotkey, luks->header.master_key_len, + errp) < 0) { + goto cleanup; + } + + + /* + * Setup the encryption objects needed to encrypt the + * master key material + */ + cipher = qcrypto_cipher_new(luks->cipher_alg, + luks->cipher_mode, + slotkey, luks->header.master_key_len, + errp); + if (!cipher) { + goto cleanup; + } + + ivgen = qcrypto_ivgen_new(luks->ivgen_alg, + luks->ivgen_cipher_alg, + luks->ivgen_hash_alg, + slotkey, luks->header.master_key_len, + errp); + if (!ivgen) { + goto cleanup; + } + + /* + * Before storing the master key, we need to vastly + * increase its size, as protection against forensic + * disk data recovery + */ + splitkey = g_new0(uint8_t, splitkeylen); + + if (qcrypto_afsplit_encode(luks->hash_alg, + luks->header.master_key_len, + slot->stripes, + masterkey, + splitkey, + errp) < 0) { + goto cleanup; + } + + /* + * Now we encrypt the split master key with the key generated + * from the user's password, before storing it + */ + if (qcrypto_block_cipher_encrypt_helper(cipher, block->niv, ivgen, + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, + 0, + splitkey, + splitkeylen, + errp) < 0) { + goto cleanup; + } + + /* Write out the slot's master key material. */ + if (writefunc(block, + slot->key_offset_sector * + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, + splitkey, splitkeylen, + opaque, + errp) != splitkeylen) { + goto cleanup; + } + + slot->active = QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED; + + if (qcrypto_block_luks_store_header(block, writefunc, opaque, errp) < 0) { + goto cleanup; + } + + ret = 0; + +cleanup: + if (slotkey) { + memset(slotkey, 0, luks->header.master_key_len); + } + if (splitkey) { + memset(splitkey, 0, splitkeylen); + } + return ret; +} + +/* * Given a key slot, and user password, this will attempt to unlock * the master encryption key from the key slot. * @@ -410,21 +882,15 @@ qcrypto_block_luks_essiv_cipher(QCryptoCipherAlgorithm cipher, */ static int qcrypto_block_luks_load_key(QCryptoBlock *block, - QCryptoBlockLUKSKeySlot *slot, + size_t slot_idx, const char *password, - QCryptoCipherAlgorithm cipheralg, - QCryptoCipherMode ciphermode, - QCryptoHashAlgorithm hash, - QCryptoIVGenAlgorithm ivalg, - QCryptoCipherAlgorithm ivcipheralg, - QCryptoHashAlgorithm ivhash, uint8_t *masterkey, - size_t masterkeylen, QCryptoBlockReadFunc readfunc, void *opaque, Error **errp) { QCryptoBlockLUKS *luks = block->opaque; + const QCryptoBlockLUKSKeySlot *slot = &luks->header.key_slots[slot_idx]; g_autofree uint8_t *splitkey = NULL; size_t splitkeylen; g_autofree uint8_t *possiblekey = NULL; @@ -438,9 +904,9 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, return 0; } - splitkeylen = masterkeylen * slot->stripes; + splitkeylen = luks->header.master_key_len * slot->stripes; splitkey = g_new0(uint8_t, splitkeylen); - possiblekey = g_new0(uint8_t, masterkeylen); + possiblekey = g_new0(uint8_t, luks->header.master_key_len); /* * The user password is used to generate a (possible) @@ -449,11 +915,11 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, * the key is correct and validate the results of * decryption later. */ - if (qcrypto_pbkdf2(hash, + if (qcrypto_pbkdf2(luks->hash_alg, (const uint8_t *)password, strlen(password), slot->salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, slot->iterations, - possiblekey, masterkeylen, + possiblekey, luks->header.master_key_len, errp) < 0) { return -1; } @@ -466,7 +932,7 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, * then encrypted. */ rv = readfunc(block, - slot->key_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, + slot->key_offset_sector * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, splitkey, splitkeylen, opaque, errp); @@ -477,19 +943,23 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, /* Setup the cipher/ivgen that we'll use to try to decrypt * the split master key material */ - cipher = qcrypto_cipher_new(cipheralg, ciphermode, - possiblekey, masterkeylen, + cipher = qcrypto_cipher_new(luks->cipher_alg, + luks->cipher_mode, + possiblekey, + luks->header.master_key_len, errp); if (!cipher) { return -1; } - niv = qcrypto_cipher_get_iv_len(cipheralg, - ciphermode); - ivgen = qcrypto_ivgen_new(ivalg, - ivcipheralg, - ivhash, - possiblekey, masterkeylen, + niv = qcrypto_cipher_get_iv_len(luks->cipher_alg, + luks->cipher_mode); + + ivgen = qcrypto_ivgen_new(luks->ivgen_alg, + luks->ivgen_cipher_alg, + luks->ivgen_hash_alg, + possiblekey, + luks->header.master_key_len, errp); if (!ivgen) { return -1; @@ -518,8 +988,8 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, * Now we've decrypted the split master key, join * it back together to get the actual master key. */ - if (qcrypto_afsplit_decode(hash, - masterkeylen, + if (qcrypto_afsplit_decode(luks->hash_alg, + luks->header.master_key_len, slot->stripes, splitkey, masterkey, @@ -536,12 +1006,14 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, * then comparing that to the hash stored in the key slot * header */ - if (qcrypto_pbkdf2(hash, - masterkey, masterkeylen, + if (qcrypto_pbkdf2(luks->hash_alg, + masterkey, + luks->header.master_key_len, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.master_key_iterations, - keydigest, G_N_ELEMENTS(keydigest), + keydigest, + G_N_ELEMENTS(keydigest), errp) < 0) { return -1; } @@ -568,37 +1040,19 @@ qcrypto_block_luks_load_key(QCryptoBlock *block, static int qcrypto_block_luks_find_key(QCryptoBlock *block, const char *password, - QCryptoCipherAlgorithm cipheralg, - QCryptoCipherMode ciphermode, - QCryptoHashAlgorithm hash, - QCryptoIVGenAlgorithm ivalg, - QCryptoCipherAlgorithm ivcipheralg, - QCryptoHashAlgorithm ivhash, - uint8_t **masterkey, - size_t *masterkeylen, + uint8_t *masterkey, QCryptoBlockReadFunc readfunc, void *opaque, Error **errp) { - QCryptoBlockLUKS *luks = block->opaque; size_t i; int rv; - *masterkey = g_new0(uint8_t, luks->header.key_bytes); - *masterkeylen = luks->header.key_bytes; - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { rv = qcrypto_block_luks_load_key(block, - &luks->header.key_slots[i], + i, password, - cipheralg, - ciphermode, - hash, - ivalg, - ivcipheralg, - ivhash, - *masterkey, - *masterkeylen, + masterkey, readfunc, opaque, errp); @@ -611,11 +1065,7 @@ qcrypto_block_luks_find_key(QCryptoBlock *block, } error_setg(errp, "Invalid password, cannot unlock any keyslot"); - error: - g_free(*masterkey); - *masterkey = NULL; - *masterkeylen = 0; return -1; } @@ -630,20 +1080,8 @@ qcrypto_block_luks_open(QCryptoBlock *block, size_t n_threads, Error **errp) { - QCryptoBlockLUKS *luks; - Error *local_err = NULL; - int ret = 0; - size_t i; - ssize_t rv; + QCryptoBlockLUKS *luks = NULL; g_autofree uint8_t *masterkey = NULL; - size_t masterkeylen; - char *ivgen_name, *ivhash_name; - QCryptoCipherMode ciphermode; - QCryptoCipherAlgorithm cipheralg; - QCryptoIVGenAlgorithm ivalg; - QCryptoCipherAlgorithm ivcipheralg; - QCryptoHashAlgorithm hash; - QCryptoHashAlgorithm ivhash; g_autofree char *password = NULL; if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { @@ -662,198 +1100,72 @@ qcrypto_block_luks_open(QCryptoBlock *block, luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; - /* Read the entire LUKS header, minus the key material from - * the underlying device */ - rv = readfunc(block, 0, - (uint8_t *)&luks->header, - sizeof(luks->header), - opaque, - errp); - if (rv < 0) { - ret = rv; - goto fail; - } - - /* The header is always stored in big-endian format, so - * convert everything to native */ - be16_to_cpus(&luks->header.version); - be32_to_cpus(&luks->header.payload_offset); - be32_to_cpus(&luks->header.key_bytes); - be32_to_cpus(&luks->header.master_key_iterations); - - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - be32_to_cpus(&luks->header.key_slots[i].active); - be32_to_cpus(&luks->header.key_slots[i].iterations); - be32_to_cpus(&luks->header.key_slots[i].key_offset); - be32_to_cpus(&luks->header.key_slots[i].stripes); - } - - if (memcmp(luks->header.magic, qcrypto_block_luks_magic, - QCRYPTO_BLOCK_LUKS_MAGIC_LEN) != 0) { - error_setg(errp, "Volume is not in LUKS format"); - ret = -EINVAL; - goto fail; - } - if (luks->header.version != QCRYPTO_BLOCK_LUKS_VERSION) { - error_setg(errp, "LUKS version %" PRIu32 " is not supported", - luks->header.version); - ret = -ENOTSUP; - goto fail; - } - - /* - * The cipher_mode header contains a string that we have - * to further parse, of the format - * - * <cipher-mode>-<iv-generator>[:<iv-hash>] - * - * eg cbc-essiv:sha256, cbc-plain64 - */ - ivgen_name = strchr(luks->header.cipher_mode, '-'); - if (!ivgen_name) { - ret = -EINVAL; - error_setg(errp, "Unexpected cipher mode string format %s", - luks->header.cipher_mode); - goto fail; - } - *ivgen_name = '\0'; - ivgen_name++; - - ivhash_name = strchr(ivgen_name, ':'); - if (!ivhash_name) { - ivhash = 0; - } else { - *ivhash_name = '\0'; - ivhash_name++; - - ivhash = qcrypto_block_luks_hash_name_lookup(ivhash_name, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); - goto fail; - } - } - - ciphermode = qcrypto_block_luks_cipher_mode_lookup(luks->header.cipher_mode, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); - goto fail; - } - - cipheralg = qcrypto_block_luks_cipher_name_lookup(luks->header.cipher_name, - ciphermode, - luks->header.key_bytes, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); + if (qcrypto_block_luks_load_header(block, readfunc, opaque, errp) < 0) { goto fail; } - hash = qcrypto_block_luks_hash_name_lookup(luks->header.hash_spec, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); + if (qcrypto_block_luks_check_header(luks, errp) < 0) { goto fail; } - ivalg = qcrypto_block_luks_ivgen_name_lookup(ivgen_name, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); + if (qcrypto_block_luks_parse_header(luks, errp) < 0) { goto fail; } - if (ivalg == QCRYPTO_IVGEN_ALG_ESSIV) { - if (!ivhash_name) { - ret = -EINVAL; - error_setg(errp, "Missing IV generator hash specification"); - goto fail; - } - ivcipheralg = qcrypto_block_luks_essiv_cipher(cipheralg, - ivhash, - &local_err); - if (local_err) { - ret = -ENOTSUP; - error_propagate(errp, local_err); - goto fail; - } - } else { - /* Note we parsed the ivhash_name earlier in the cipher_mode - * spec string even with plain/plain64 ivgens, but we - * will ignore it, since it is irrelevant for these ivgens. - * This is for compat with dm-crypt which will silently - * ignore hash names with these ivgens rather than report - * an error about the invalid usage - */ - ivcipheralg = cipheralg; - } - if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { /* Try to find which key slot our password is valid for * and unlock the master key from that slot. */ + + masterkey = g_new0(uint8_t, luks->header.master_key_len); + if (qcrypto_block_luks_find_key(block, password, - cipheralg, ciphermode, - hash, - ivalg, - ivcipheralg, - ivhash, - &masterkey, &masterkeylen, + masterkey, readfunc, opaque, errp) < 0) { - ret = -EACCES; goto fail; } /* We have a valid master key now, so can setup the * block device payload decryption objects */ - block->kdfhash = hash; - block->niv = qcrypto_cipher_get_iv_len(cipheralg, - ciphermode); - block->ivgen = qcrypto_ivgen_new(ivalg, - ivcipheralg, - ivhash, - masterkey, masterkeylen, + block->kdfhash = luks->hash_alg; + block->niv = qcrypto_cipher_get_iv_len(luks->cipher_alg, + luks->cipher_mode); + + block->ivgen = qcrypto_ivgen_new(luks->ivgen_alg, + luks->ivgen_cipher_alg, + luks->ivgen_hash_alg, + masterkey, + luks->header.master_key_len, errp); if (!block->ivgen) { - ret = -ENOTSUP; goto fail; } - ret = qcrypto_block_init_cipher(block, cipheralg, ciphermode, - masterkey, masterkeylen, n_threads, - errp); - if (ret < 0) { - ret = -ENOTSUP; + if (qcrypto_block_init_cipher(block, + luks->cipher_alg, + luks->cipher_mode, + masterkey, + luks->header.master_key_len, + n_threads, + errp) < 0) { goto fail; } } block->sector_size = QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; - block->payload_offset = luks->header.payload_offset * + block->payload_offset = luks->header.payload_offset_sector * block->sector_size; - luks->cipher_alg = cipheralg; - luks->cipher_mode = ciphermode; - luks->ivgen_alg = ivalg; - luks->ivgen_hash_alg = ivhash; - luks->hash_alg = hash; - return 0; fail: qcrypto_block_free_cipher(block); qcrypto_ivgen_free(block->ivgen); g_free(luks); - return ret; + return -1; } @@ -878,12 +1190,9 @@ qcrypto_block_luks_create(QCryptoBlock *block, QCryptoBlockCreateOptionsLUKS luks_opts; Error *local_err = NULL; g_autofree uint8_t *masterkey = NULL; - g_autofree uint8_t *slotkey = NULL; - g_autofree uint8_t *splitkey = NULL; - size_t splitkeylen = 0; + size_t header_sectors; + size_t split_key_sectors; size_t i; - g_autoptr(QCryptoCipher) cipher = NULL; - g_autoptr(QCryptoIVGen) ivgen = NULL; g_autofree char *password = NULL; const char *cipher_alg; const char *cipher_mode; @@ -891,7 +1200,6 @@ qcrypto_block_luks_create(QCryptoBlock *block, const char *ivgen_hash_alg = NULL; const char *hash_alg; g_autofree char *cipher_mode_spec = NULL; - QCryptoCipherAlgorithm ivcipheralg = 0; uint64_t iters; memcpy(&luks_opts, &options->u.luks, sizeof(luks_opts)); @@ -916,6 +1224,17 @@ qcrypto_block_luks_create(QCryptoBlock *block, luks_opts.has_ivgen_hash_alg = true; } } + + luks = g_new0(QCryptoBlockLUKS, 1); + block->opaque = luks; + + luks->cipher_alg = luks_opts.cipher_alg; + luks->cipher_mode = luks_opts.cipher_mode; + luks->ivgen_alg = luks_opts.ivgen_alg; + luks->ivgen_hash_alg = luks_opts.ivgen_hash_alg; + luks->hash_alg = luks_opts.hash_alg; + + /* Note we're allowing ivgen_hash_alg to be set even for * non-essiv iv generators that don't need a hash. It will * be silently ignored, for compatibility with dm-crypt */ @@ -923,15 +1242,13 @@ qcrypto_block_luks_create(QCryptoBlock *block, if (!options->u.luks.key_secret) { error_setg(errp, "Parameter '%skey-secret' is required for cipher", optprefix ? optprefix : ""); - return -1; + goto error; } password = qcrypto_secret_lookup_as_utf8(luks_opts.key_secret, errp); if (!password) { - return -1; + goto error; } - luks = g_new0(QCryptoBlockLUKS, 1); - block->opaque = luks; memcpy(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN); @@ -978,24 +1295,27 @@ qcrypto_block_luks_create(QCryptoBlock *block, } if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) { - ivcipheralg = qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, - luks_opts.ivgen_hash_alg, - &local_err); + luks->ivgen_cipher_alg = + qcrypto_block_luks_essiv_cipher(luks_opts.cipher_alg, + luks_opts.ivgen_hash_alg, + &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } } else { - ivcipheralg = luks_opts.cipher_alg; + luks->ivgen_cipher_alg = luks_opts.cipher_alg; } strcpy(luks->header.cipher_name, cipher_alg); strcpy(luks->header.cipher_mode, cipher_mode_spec); strcpy(luks->header.hash_spec, hash_alg); - luks->header.key_bytes = qcrypto_cipher_get_key_len(luks_opts.cipher_alg); + luks->header.master_key_len = + qcrypto_cipher_get_key_len(luks_opts.cipher_alg); + if (luks_opts.cipher_mode == QCRYPTO_CIPHER_MODE_XTS) { - luks->header.key_bytes *= 2; + luks->header.master_key_len *= 2; } /* Generate the salt used for hashing the master key @@ -1008,9 +1328,9 @@ qcrypto_block_luks_create(QCryptoBlock *block, } /* Generate random master key */ - masterkey = g_new0(uint8_t, luks->header.key_bytes); + masterkey = g_new0(uint8_t, luks->header.master_key_len); if (qcrypto_random_bytes(masterkey, - luks->header.key_bytes, errp) < 0) { + luks->header.master_key_len, errp) < 0) { goto error; } @@ -1018,7 +1338,7 @@ qcrypto_block_luks_create(QCryptoBlock *block, /* Setup the block device payload encryption objects */ if (qcrypto_block_init_cipher(block, luks_opts.cipher_alg, luks_opts.cipher_mode, masterkey, - luks->header.key_bytes, 1, errp) < 0) { + luks->header.master_key_len, 1, errp) < 0) { goto error; } @@ -1026,9 +1346,9 @@ qcrypto_block_luks_create(QCryptoBlock *block, block->niv = qcrypto_cipher_get_iv_len(luks_opts.cipher_alg, luks_opts.cipher_mode); block->ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, - ivcipheralg, + luks->ivgen_cipher_alg, luks_opts.ivgen_hash_alg, - masterkey, luks->header.key_bytes, + masterkey, luks->header.master_key_len, errp); if (!block->ivgen) { @@ -1040,7 +1360,7 @@ qcrypto_block_luks_create(QCryptoBlock *block, * key, in order to have 1 second of compute time used */ iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, - masterkey, luks->header.key_bytes, + masterkey, luks->header.master_key_len, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, QCRYPTO_BLOCK_LUKS_DIGEST_LEN, @@ -1080,7 +1400,7 @@ qcrypto_block_luks_create(QCryptoBlock *block, * valid master key */ if (qcrypto_pbkdf2(luks_opts.hash_alg, - masterkey, luks->header.key_bytes, + masterkey, luks->header.master_key_len, luks->header.master_key_salt, QCRYPTO_BLOCK_LUKS_SALT_LEN, luks->header.master_key_iterations, @@ -1090,143 +1410,32 @@ qcrypto_block_luks_create(QCryptoBlock *block, goto error; } + /* start with the sector that follows the header*/ + header_sectors = QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / + QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; - /* Although LUKS has multiple key slots, we're just going - * to use the first key slot */ - splitkeylen = luks->header.key_bytes * QCRYPTO_BLOCK_LUKS_STRIPES; - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - luks->header.key_slots[i].active = i == 0 ? - QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED : - QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED; - luks->header.key_slots[i].stripes = QCRYPTO_BLOCK_LUKS_STRIPES; - - /* This calculation doesn't match that shown in the spec, - * but instead follows the cryptsetup implementation. - */ - luks->header.key_slots[i].key_offset = - (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + - (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), - (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * i); - } - - if (qcrypto_random_bytes(luks->header.key_slots[0].salt, - QCRYPTO_BLOCK_LUKS_SALT_LEN, - errp) < 0) { - goto error; - } - - /* Again we determine how many iterations are required to - * hash the user password while consuming 1 second of compute - * time */ - iters = qcrypto_pbkdf2_count_iters(luks_opts.hash_alg, - (uint8_t *)password, strlen(password), - luks->header.key_slots[0].salt, - QCRYPTO_BLOCK_LUKS_SALT_LEN, - luks->header.key_bytes, - &local_err); - if (local_err) { - error_propagate(errp, local_err); - goto error; - } - - if (iters > (ULLONG_MAX / luks_opts.iter_time)) { - error_setg_errno(errp, ERANGE, - "PBKDF iterations %llu too large to scale", - (unsigned long long)iters); - goto error; - } - - /* iter_time was in millis, but count_iters reported for secs */ - iters = iters * luks_opts.iter_time / 1000; - - if (iters > UINT32_MAX) { - error_setg_errno(errp, ERANGE, - "PBKDF iterations %llu larger than %u", - (unsigned long long)iters, UINT32_MAX); - goto error; - } - - luks->header.key_slots[0].iterations = - MAX(iters, QCRYPTO_BLOCK_LUKS_MIN_SLOT_KEY_ITERS); - - - /* Generate a key that we'll use to encrypt the master - * key, from the user's password - */ - slotkey = g_new0(uint8_t, luks->header.key_bytes); - if (qcrypto_pbkdf2(luks_opts.hash_alg, - (uint8_t *)password, strlen(password), - luks->header.key_slots[0].salt, - QCRYPTO_BLOCK_LUKS_SALT_LEN, - luks->header.key_slots[0].iterations, - slotkey, luks->header.key_bytes, - errp) < 0) { - goto error; - } - - - /* Setup the encryption objects needed to encrypt the - * master key material - */ - cipher = qcrypto_cipher_new(luks_opts.cipher_alg, - luks_opts.cipher_mode, - slotkey, luks->header.key_bytes, - errp); - if (!cipher) { - goto error; - } - - ivgen = qcrypto_ivgen_new(luks_opts.ivgen_alg, - ivcipheralg, - luks_opts.ivgen_hash_alg, - slotkey, luks->header.key_bytes, - errp); - if (!ivgen) { - goto error; - } + split_key_sectors = + qcrypto_block_luks_splitkeylen_sectors(luks, + header_sectors, + QCRYPTO_BLOCK_LUKS_STRIPES); - /* Before storing the master key, we need to vastly - * increase its size, as protection against forensic - * disk data recovery */ - splitkey = g_new0(uint8_t, splitkeylen); - - if (qcrypto_afsplit_encode(luks_opts.hash_alg, - luks->header.key_bytes, - luks->header.key_slots[0].stripes, - masterkey, - splitkey, - errp) < 0) { - goto error; - } + for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { + QCryptoBlockLUKSKeySlot *slot = &luks->header.key_slots[i]; + slot->active = QCRYPTO_BLOCK_LUKS_KEY_SLOT_DISABLED; - /* Now we encrypt the split master key with the key generated - * from the user's password, before storing it */ - if (qcrypto_block_cipher_encrypt_helper(cipher, block->niv, ivgen, - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, - 0, - splitkey, - splitkeylen, - errp) < 0) { - goto error; + slot->key_offset_sector = header_sectors + i * split_key_sectors; + slot->stripes = QCRYPTO_BLOCK_LUKS_STRIPES; } - /* The total size of the LUKS headers is the partition header + key * slot headers, rounded up to the nearest sector, combined with * the size of each master key material region, also rounded up * to the nearest sector */ - luks->header.payload_offset = - (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE) + - (ROUND_UP(DIV_ROUND_UP(splitkeylen, QCRYPTO_BLOCK_LUKS_SECTOR_SIZE), - (QCRYPTO_BLOCK_LUKS_KEY_SLOT_OFFSET / - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE)) * - QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS); + luks->header.payload_offset_sector = header_sectors + + QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS * split_key_sectors; block->sector_size = QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; - block->payload_offset = luks->header.payload_offset * + block->payload_offset = luks->header.payload_offset_sector * block->sector_size; /* Reserve header space to match payload offset */ @@ -1236,77 +1445,27 @@ qcrypto_block_luks_create(QCryptoBlock *block, goto error; } - /* Everything on disk uses Big Endian, so flip header fields - * before writing them */ - cpu_to_be16s(&luks->header.version); - cpu_to_be32s(&luks->header.payload_offset); - cpu_to_be32s(&luks->header.key_bytes); - cpu_to_be32s(&luks->header.master_key_iterations); - - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - cpu_to_be32s(&luks->header.key_slots[i].active); - cpu_to_be32s(&luks->header.key_slots[i].iterations); - cpu_to_be32s(&luks->header.key_slots[i].key_offset); - cpu_to_be32s(&luks->header.key_slots[i].stripes); - } - - - /* Write out the partition header and key slot headers */ - writefunc(block, 0, - (const uint8_t *)&luks->header, - sizeof(luks->header), - opaque, - &local_err); - - /* Delay checking local_err until we've byte-swapped */ - - /* Byte swap the header back to native, in case we need - * to read it again later */ - be16_to_cpus(&luks->header.version); - be32_to_cpus(&luks->header.payload_offset); - be32_to_cpus(&luks->header.key_bytes); - be32_to_cpus(&luks->header.master_key_iterations); - for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { - be32_to_cpus(&luks->header.key_slots[i].active); - be32_to_cpus(&luks->header.key_slots[i].iterations); - be32_to_cpus(&luks->header.key_slots[i].key_offset); - be32_to_cpus(&luks->header.key_slots[i].stripes); - } - - if (local_err) { - error_propagate(errp, local_err); + /* populate the slot 0 with the password encrypted master key*/ + /* This will also store the header */ + if (qcrypto_block_luks_store_key(block, + 0, + password, + masterkey, + luks_opts.iter_time, + writefunc, + opaque, + errp) < 0) { goto error; } - /* Write out the master key material, starting at the - * sector immediately following the partition header. */ - if (writefunc(block, - luks->header.key_slots[0].key_offset * - QCRYPTO_BLOCK_LUKS_SECTOR_SIZE, - splitkey, splitkeylen, - opaque, - errp) != splitkeylen) { - goto error; - } - - luks->cipher_alg = luks_opts.cipher_alg; - luks->cipher_mode = luks_opts.cipher_mode; - luks->ivgen_alg = luks_opts.ivgen_alg; - luks->ivgen_hash_alg = luks_opts.ivgen_hash_alg; - luks->hash_alg = luks_opts.hash_alg; - - memset(masterkey, 0, luks->header.key_bytes); - memset(slotkey, 0, luks->header.key_bytes); + memset(masterkey, 0, luks->header.master_key_len); return 0; error: if (masterkey) { - memset(masterkey, 0, luks->header.key_bytes); - } - if (slotkey) { - memset(slotkey, 0, luks->header.key_bytes); + memset(masterkey, 0, luks->header.master_key_len); } qcrypto_block_free_cipher(block); @@ -1346,7 +1505,7 @@ static int qcrypto_block_luks_get_info(QCryptoBlock *block, slots->value = slot = g_new0(QCryptoBlockInfoLUKSSlot, 1); slot->active = luks->header.key_slots[i].active == QCRYPTO_BLOCK_LUKS_KEY_SLOT_ENABLED; - slot->key_offset = luks->header.key_slots[i].key_offset + slot->key_offset = luks->header.key_slots[i].key_offset_sector * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; if (slot->active) { slot->has_iters = true; diff --git a/hw/arm/boot.c b/hw/arm/boot.c index bf97ef3..c264864 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -575,7 +575,7 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, goto fail; } - if (scells < 2 && binfo->ram_size >= (1ULL << 32)) { + if (scells < 2 && binfo->ram_size >= 4 * GiB) { /* This is user error so deserves a friendlier error message * than the failure of setprop_sized_cells would provide */ @@ -754,6 +754,8 @@ static void do_cpu_reset(void *opaque) (cs != first_cpu || !info->secure_board_setup)) { /* Linux expects non-secure state */ env->cp15.scr_el3 |= SCR_NS; + /* Set NSACR.{CP11,CP10} so NS can access the FPU */ + env->cp15.nsacr |= 3 << 10; } } @@ -1095,7 +1097,7 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, * we might still make a bad choice here. */ info->initrd_start = info->loader_start + - MIN(info->ram_size / 2, 128 * 1024 * 1024); + MIN(info->ram_size / 2, 128 * MiB); if (image_high_addr) { info->initrd_start = MAX(info->initrd_start, image_high_addr); } @@ -1155,13 +1157,13 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, * * Let's play safe and prealign it to 2MB to give us some space. */ - align = 2 * 1024 * 1024; + align = 2 * MiB; } else { /* * Some 32bit kernels will trash anything in the 4K page the * initrd ends in, so make sure the DTB isn't caught up in that. */ - align = 4096; + align = 4 * KiB; } /* Place the DTB after the initrd in memory with alignment. */ @@ -1178,7 +1180,7 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, info->loader_start + KERNEL_ARGS_ADDR; fixupcontext[FIXUP_ARGPTR_HI] = (info->loader_start + KERNEL_ARGS_ADDR) >> 32; - if (info->ram_size >= (1ULL << 32)) { + if (info->ram_size >= 4 * GiB) { error_report("RAM size must be less than 4GB to boot" " Linux kernel using ATAGS (try passing a device tree" " using -dtb)"); diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 797ecbb..6620569 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -377,9 +377,6 @@ static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code) case SCLP_CMD_WRITE_EVENT_MASK: write_event_mask(ef, sccb); break; - default: - sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); - break; } } diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 963a41c..2d2f4a7 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -695,10 +695,15 @@ static const MemoryRegionOps s390_msi_ctrl_ops = { void s390_pci_iommu_enable(S390PCIIOMMU *iommu) { + /* + * The iommu region is initialized against a 0-mapped address space, + * so the smallest IOMMU region we can define runs from 0 to the end + * of the PCI address space. + */ char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), - name, iommu->pal - iommu->pba + 1); + name, iommu->pal + 1); iommu->enabled = true; memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); g_free(name); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 8bfb668..18ad279 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -154,39 +154,15 @@ static void virtio_ccw_register_hcalls(void) virtio_ccw_hcall_early_printk); } -/* - * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages - * as the dirty bitmap must be managed by bitops that take an int as - * position indicator. If we have a guest beyond that we will split off - * new subregions. The split must happen on a segment boundary (1MB). - */ -#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1) -#define SEG_MSK (~0xfffffULL) -#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK) static void s390_memory_init(ram_addr_t mem_size) { MemoryRegion *sysmem = get_system_memory(); - ram_addr_t chunk, offset = 0; - unsigned int number = 0; + MemoryRegion *ram = g_new(MemoryRegion, 1); Error *local_err = NULL; - gchar *name; /* allocate RAM for core */ - name = g_strdup_printf("s390.ram"); - while (mem_size) { - MemoryRegion *ram = g_new(MemoryRegion, 1); - uint64_t size = mem_size; - - /* KVM does not allow memslots >= 8 TB */ - chunk = MIN(size, KVM_SLOT_MAX_BYTES); - memory_region_allocate_system_memory(ram, NULL, name, chunk); - memory_region_add_subregion(sysmem, offset, ram); - mem_size -= chunk; - offset += chunk; - g_free(name); - name = g_strdup_printf("s390.ram.%u", ++number); - } - g_free(name); + memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size); + memory_region_add_subregion(sysmem, 0, ram); /* * Configure the maximum page size. As no memory devices were created diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index fac7c3b..f57ce7b 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -68,6 +68,12 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) read_info->ibc_val = cpu_to_be32(s390_get_ibc_val()); + if (be16_to_cpu(sccb->h.length) < + (sizeof(ReadInfo) + cpu_count * sizeof(CPUEntry))) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + /* Configuration Characteristic (Extension) */ s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR, read_info->conf_char); @@ -118,6 +124,12 @@ static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb) cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries)); cpu_info->nr_standby = cpu_to_be16(0); + if (be16_to_cpu(sccb->h.length) < + (sizeof(ReadCpuInfo) + cpu_count * sizeof(CPUEntry))) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + /* The standby offset is 16-byte for each CPU */ cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured + cpu_info->nr_configured*sizeof(CPUEntry)); @@ -213,14 +225,33 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) cpu_physical_memory_read(sccb, &work_sccb, sccb_len); /* Valid sccb sizes */ - if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) || - be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) { + if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader)) { r = -PGM_SPECIFICATION; goto out; } - sclp_c->execute(sclp, &work_sccb, code); + switch (code & SCLP_CMD_CODE_MASK) { + case SCLP_CMDW_READ_SCP_INFO: + case SCLP_CMDW_READ_SCP_INFO_FORCED: + case SCLP_CMDW_READ_CPU_INFO: + case SCLP_CMDW_CONFIGURE_IOA: + case SCLP_CMDW_DECONFIGURE_IOA: + case SCLP_CMD_READ_EVENT_DATA: + case SCLP_CMD_WRITE_EVENT_DATA: + case SCLP_CMD_WRITE_EVENT_MASK: + break; + default: + work_sccb.h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); + goto out_write; + } + + if ((sccb + be16_to_cpu(work_sccb.h.length)) > ((sccb & PAGE_MASK) + PAGE_SIZE)) { + work_sccb.h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); + goto out_write; + } + sclp_c->execute(sclp, &work_sccb, code); +out_write: cpu_physical_memory_write(sccb, &work_sccb, be16_to_cpu(work_sccb.h.length)); diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 72b2d1b..ac2d1f8 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -41,4 +41,5 @@ typedef struct KVMMemoryListener { void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, AddressSpace *as, int as_id); +void kvm_set_max_memslot_size(hwaddr max_slot_size); #endif diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index 8d65de5..e28c45c 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -325,9 +325,6 @@ void cpu_loop(CPUARMState *env) if (n == ARM_NR_cacheflush) { /* nop */ - } else if (n == ARM_NR_semihosting - || n == ARM_NR_thumb_semihosting) { - env->regs[0] = do_arm_semihosting (env); } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) { /* linux syscall */ if (env->thumb || n == 0) { diff --git a/linux-user/arm/target_syscall.h b/linux-user/arm/target_syscall.h index afc0772..f85cbda 100644 --- a/linux-user/arm/target_syscall.h +++ b/linux-user/arm/target_syscall.h @@ -18,9 +18,6 @@ struct target_pt_regs { #define ARM_NR_set_tls (ARM_NR_BASE + 5) #define ARM_NR_get_tls (ARM_NR_BASE + 6) -#define ARM_NR_semihosting 0x123456 -#define ARM_NR_thumb_semihosting 0xAB - #if defined(TARGET_WORDS_BIGENDIAN) #define UNAME_MACHINE "armv5teb" #else diff --git a/target/arm/helper.c b/target/arm/helper.c index 507026c..0d9a2d2 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6733,6 +6733,19 @@ void register_cp_regs_for_features(ARMCPU *cpu) } if (arm_feature(env, ARM_FEATURE_CBAR)) { + /* + * CBAR is IMPDEF, but common on Arm Cortex-A implementations. + * There are two flavours: + * (1) older 32-bit only cores have a simple 32-bit CBAR + * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a + * 32-bit register visible to AArch32 at a different encoding + * to the "flavour 1" register and with the bits rearranged to + * be able to squash a 64-bit address into the 32-bit view. + * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but + * in future if we support AArch32-only configs of some of the + * AArch64 cores we might need to add a specific feature flag + * to indicate cores with "flavour 2" CBAR. + */ if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 32 bit view is [31:18] 0...0 [43:32]. */ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) @@ -6740,12 +6753,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo cbar_reginfo[] = { { .name = "CBAR", .type = ARM_CP_CONST, - .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, - .access = PL1_R, .resetvalue = cpu->reset_cbar }, + .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, + .access = PL1_R, .resetvalue = cbar32 }, { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_CONST, .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, - .access = PL1_R, .resetvalue = cbar32 }, + .access = PL1_R, .resetvalue = cpu->reset_cbar }, REGINFO_SENTINEL }; /* We don't implement a r/w 64 bit CBAR currently */ @@ -8339,88 +8352,32 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) new_el, env->pc, pstate_read(env)); } -static inline bool check_for_semihosting(CPUState *cs) -{ +/* + * Do semihosting call and set the appropriate return value. All the + * permission and validity checks have been done at translate time. + * + * We only see semihosting exceptions in TCG only as they are not + * trapped to the hypervisor in KVM. + */ #ifdef CONFIG_TCG - /* Check whether this exception is a semihosting call; if so - * then handle it and return true; otherwise return false. - */ +static void handle_semihosting(CPUState *cs) +{ ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; if (is_a64(env)) { - if (cs->exception_index == EXCP_SEMIHOST) { - /* This is always the 64-bit semihosting exception. - * The "is this usermode" and "is semihosting enabled" - * checks have been done at translate time. - */ - qemu_log_mask(CPU_LOG_INT, - "...handling as semihosting call 0x%" PRIx64 "\n", - env->xregs[0]); - env->xregs[0] = do_arm_semihosting(env); - return true; - } - return false; + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%" PRIx64 "\n", + env->xregs[0]); + env->xregs[0] = do_arm_semihosting(env); } else { - uint32_t imm; - - /* Only intercept calls from privileged modes, to provide some - * semblance of security. - */ - if (cs->exception_index != EXCP_SEMIHOST && - (!semihosting_enabled() || - ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { - return false; - } - - switch (cs->exception_index) { - case EXCP_SEMIHOST: - /* This is always a semihosting call; the "is this usermode" - * and "is semihosting enabled" checks have been done at - * translate time. - */ - break; - case EXCP_SWI: - /* Check for semihosting interrupt. */ - if (env->thumb) { - imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) - & 0xff; - if (imm == 0xab) { - break; - } - } else { - imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) - & 0xffffff; - if (imm == 0x123456) { - break; - } - } - return false; - case EXCP_BKPT: - /* See if this is a semihosting syscall. */ - if (env->thumb) { - imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) - & 0xff; - if (imm == 0xab) { - env->regs[15] += 2; - break; - } - } - return false; - default: - return false; - } - qemu_log_mask(CPU_LOG_INT, "...handling as semihosting call 0x%x\n", env->regs[0]); env->regs[0] = do_arm_semihosting(env); - return true; } -#else - return false; -#endif } +#endif /* Handle a CPU exception for A and R profile CPUs. * Do any appropriate logging, handle PSCI calls, and then hand off @@ -8451,13 +8408,17 @@ void arm_cpu_do_interrupt(CPUState *cs) return; } - /* Semihosting semantics depend on the register width of the - * code that caused the exception, not the target exception level, - * so must be handled here. + /* + * Semihosting semantics depend on the register width of the code + * that caused the exception, not the target exception level, so + * must be handled here. */ - if (check_for_semihosting(cs)) { +#ifdef CONFIG_TCG + if (cs->exception_index == EXCP_SEMIHOST) { + handle_semihosting(cs); return; } +#endif /* Hooks may change global state so BQL should be held, also the * BQL needs to be held for any modification of diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 884d35d..27cd2f3 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -2114,19 +2114,13 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) break; } break; + case EXCP_SEMIHOST: + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%x\n", + env->regs[0]); + env->regs[0] = do_arm_semihosting(env); + return; case EXCP_BKPT: - if (semihosting_enabled()) { - int nr; - nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; - if (nr == 0xab) { - env->regs[15] += 2; - qemu_log_mask(CPU_LOG_INT, - "...handling as semihosting call 0x%x\n", - env->regs[0]); - env->regs[0] = do_arm_semihosting(env); - return; - } - } armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); break; case EXCP_IRQ: diff --git a/target/arm/translate.c b/target/arm/translate.c index 34bb280..698c594 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -8424,7 +8424,16 @@ static bool trans_BKPT(DisasContext *s, arg_BKPT *a) if (!ENABLE_ARCH_5) { return false; } - gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); + if (arm_dc_feature(s, ARM_FEATURE_M) && + semihosting_enabled() && +#ifndef CONFIG_USER_ONLY + !IS_USER(s) && +#endif + (a->imm == 0xab)) { + gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST); + } else { + gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); + } return true; } @@ -10213,14 +10222,25 @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a) } /* - * Supervisor call + * Supervisor call - both T32 & A32 come here so we need to check + * which mode we are in when checking for semihosting. */ static bool trans_SVC(DisasContext *s, arg_SVC *a) { - gen_set_pc_im(s, s->base.pc_next); - s->svc_imm = a->imm; - s->base.is_jmp = DISAS_SWI; + const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; + + if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() && +#ifndef CONFIG_USER_ONLY + !IS_USER(s) && +#endif + (a->imm == semihost_imm)) { + gen_exception_internal_insn(s, s->base.pc_next, EXCP_SEMIHOST); + } else { + gen_set_pc_im(s, s->base.pc_next); + s->svc_imm = a->imm; + s->base.is_jmp = DISAS_SWI; + } return true; } diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 97a662a..c24c869 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -28,6 +28,7 @@ #include "cpu.h" #include "internal.h" #include "kvm_s390x.h" +#include "sysemu/kvm_int.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/timer.h" @@ -122,6 +123,14 @@ */ #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ (max_cpus + NR_LOCAL_IRQS)) +/* + * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages + * as the dirty bitmap must be managed by bitops that take an int as + * position indicator. This would end at an unaligned address + * (0x7fffff00000). As future variants might provide larger pages + * and to make all addresses properly aligned, let us split at 4TB. + */ +#define KVM_SLOT_MAX_BYTES (4UL * TiB) static CPUWatchpoint hw_watchpoint; /* @@ -355,6 +364,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) */ /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */ + kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); return 0; } diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index 9758f89..509f1af 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -21,4 +21,9 @@ run-fcvt: fcvt AARCH64_TESTS += pauth-1 pauth-2 run-pauth-%: QEMU_OPTS += -cpu max +# Semihosting smoke test for linux-user +AARCH64_TESTS += semihosting +run-semihosting: semihosting + $(call run-test,$<,$(QEMU) $< 2> $<.err, "$< on $(TARGET_NAME)") + TESTS += $(AARCH64_TESTS) diff --git a/tests/tcg/arm/Makefile.target b/tests/tcg/arm/Makefile.target index 9f43d98..3ddff85 100644 --- a/tests/tcg/arm/Makefile.target +++ b/tests/tcg/arm/Makefile.target @@ -8,7 +8,6 @@ ARM_SRC=$(SRC_PATH)/tests/tcg/arm # Set search path for all sources VPATH += $(ARM_SRC) -# Multiarch Tests float_madds: CFLAGS+=-mfpu=neon-vfpv4 # Basic Hello World @@ -30,6 +29,11 @@ run-fcvt: fcvt $(call run-test,fcvt,$(QEMU) $<,"$< on $(TARGET_NAME)") $(call diff-out,fcvt,$(ARM_SRC)/fcvt.ref) +# Semihosting smoke test for linux-user +ARM_TESTS += semihosting +run-semihosting: semihosting + $(call run-test,$<,$(QEMU) $< 2> $<.err, "$< on $(TARGET_NAME)") + TESTS += $(ARM_TESTS) # On ARM Linux only supports 4k pages diff --git a/tests/tcg/arm/semihosting.c b/tests/tcg/arm/semihosting.c new file mode 100644 index 0000000..09c89cb --- /dev/null +++ b/tests/tcg/arm/semihosting.c @@ -0,0 +1,45 @@ +/* + * linux-user semihosting checks + * + * Copyright (c) 2019 + * Written by Alex Bennée <alex.bennee@linaro.org> + * + * SPDX-License-Identifier: GPL-3.0-or-later + */ + +#include <stdint.h> + +#define SYS_WRITE0 0x04 +#define SYS_REPORTEXC 0x18 + +void __semi_call(uintptr_t type, uintptr_t arg0) +{ +#if defined(__arm__) + register uintptr_t t asm("r0") = type; + register uintptr_t a0 asm("r1") = arg0; + asm("svc 0xab" + : /* no return */ + : "r" (t), "r" (a0)); +#else + register uintptr_t t asm("x0") = type; + register uintptr_t a0 asm("x1") = arg0; + asm("hlt 0xf000" + : /* no return */ + : "r" (t), "r" (a0)); +#endif +} + +int main(int argc, char *argv[argc]) +{ +#if defined(__arm__) + uintptr_t exit_code = 0x20026; +#else + uintptr_t exit_block[2] = {0x20026, 0}; + uintptr_t exit_code = (uintptr_t) &exit_block; +#endif + + __semi_call(SYS_WRITE0, (uintptr_t) "Hello World"); + __semi_call(SYS_REPORTEXC, exit_code); + /* if we get here we failed */ + return -1; +} |