aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-05-23 22:09:59 -0700
committerRichard Henderson <richard.henderson@linaro.org>2024-05-23 22:09:59 -0700
commitffdd099a782556b9ead26551a6f1d070a595306d (patch)
treee5a84eb7a1c9ec0f6ee8957c4c076e2b05297c51 /include
parent70581940cabcc51b329652becddfbc6a261b1b83 (diff)
parente48fb4c590a23d81ee1d2f09ee9bcf5dd5f98e43 (diff)
downloadqemu-ffdd099a782556b9ead26551a6f1d070a595306d.zip
qemu-ffdd099a782556b9ead26551a6f1d070a595306d.tar.gz
qemu-ffdd099a782556b9ead26551a6f1d070a595306d.tar.bz2
Merge tag 'pull-ppc-for-9.1-1-20240524-1' of https://gitlab.com/npiggin/qemu into staging
*** NOTE *** This replaces the previous PR for tags/pull-ppc-for-9.1-1-20240524 * Fix an interesting TLB invalidate race * Implement more instructions with decodetree * Add the POWER8/9/10 BHRB facility * Add missing instructions, registers, SMT support * First round of a big MMU xlate cleanup # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCgAdFiEETkN92lZhb0MpsKeVZ7MCdqhiHK4FAmZP1bsACgkQZ7MCdqhi # HK7TuQ/7BQugpF2yOYroQmo0Yl4RPfFp6ACqfYQgehcGegg3SWpEselTeOJla3G9 # UyVd0mlWf7DciYi61qit/WyLOeuRXMtRjrnFLV2wz9o7D/Ey5/aLQfUL4oCDt/i2 # hmmq3ZAcr7WWxaz338pLJx9gIVjaNiqSoRz9HgHNkQq0pxkbEo1eSjZ6QLSvqYC2 # dwtJHywFrHNo14aq1Nc7PZ5MFxNN6t7hm7KRHKFrt8Obar15n64MSHyRvMzHI9EO # RgNzz9/qe5yvJ4kmaNiZjntxojXCBUhhlCTtaDIG1LDBc2yNG5VWQUnwThvyNxxX # h+Ia4Pv7blXikQ6RuqsvFyrLCgUvwXwBiQwiQCJyITk0asLyJVwhkUpiI/jJvOun # AujSA/6e2pbSe4RUZytkzygx2KVODrVtcSoOvo8kRw+2aTOWMv7DbfBalmWJQWgx # 0xSeuUz22eNKEL2XbZWNM5v0OgXUXIs9BVeCqn7RB4lC2RNi72v111UPuKYq6Ijx # SHWQMGPGu9FNBsIdriclRWXVXHpVHz/s/l8AJT8ad6E57UHVk5zCPrbFZFImvQkL # E7xlctijeST8V5qGyBPG3M4aPoER9+6J32ORSx7KwDwr+fzkbNUXC8UUC4OjAZ+d # 2vhie9Vs5xWq/E8gGovTymeQ4yHArobDz/j7+rrr0qeppnKLWjM= # =jHL7 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 23 May 2024 04:48:11 PM PDT # gpg: using RSA key 4E437DDA56616F4329B0A79567B30276A8621CAE # gpg: Good signature from "Nicholas Piggin <npiggin@gmail.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 4E43 7DDA 5661 6F43 29B0 A795 67B3 0276 A862 1CAE * tag 'pull-ppc-for-9.1-1-20240524-1' of https://gitlab.com/npiggin/qemu: (72 commits) target/ppc: Remove pp_check() and reuse ppc_hash32_pp_prot() target/ppc: Move out BookE and related MMU functions from mmu_common.c target/ppc: Add a function to check for page protection bit target/ppc/mmu-radix64.c: Drop a local variable target/ppc/mmu-hash32.c: Drop a local variable target/ppc: Split off common embedded TLB init target/ppc: Remove id_tlbs flag from CPU env target/ppc: Move mmu_ctx_t type to mmu_common.c target/ppc: Transform ppc_jumbo_xlate() into ppc_6xx_xlate() target/ppc: Split off 40x cases from ppc_jumbo_xlate() target/ppc: Split off real mode handling from get_physical_address_wtlb() target/ppc: Simplify ppc_booke_xlate() part 2 target/ppc: Simplify ppc_booke_xlate() part 1 target/ppc: Split off BookE handling from ppc_jumbo_xlate() target/ppc: Remove BookE from direct store handling target/ppc: Don't use mmu_ctx_t in mmubooke206_get_physical_address() target/ppc: Don't use mmu_ctx_t in mmubooke_get_physical_address() target/ppc: Don't use mmu_ctx_t for mmu40x_get_physical_address() target/ppc: Replace hard coded constants in ppc_jumbo_xlate() target/ppc: Deindent ppc_jumbo_xlate() ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h97
1 files changed, 15 insertions, 82 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 2cd7b8f..b6b46ad 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -68,24 +68,15 @@ void tlb_destroy(CPUState *cpu);
*/
void tlb_flush_page(CPUState *cpu, vaddr addr);
/**
- * tlb_flush_page_all_cpus:
+ * tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
- * Flush one page from the TLB of the specified CPU, for all
+ * Flush one page from the TLB of all CPUs, for all
* MMU indexes.
- */
-void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
-/**
- * tlb_flush_page_all_cpus_synced:
- * @cpu: src CPU of the flush
- * @addr: virtual address of page to be flushed
*
- * Flush one page from the TLB of the specified CPU, for all MMU
- * indexes like tlb_flush_page_all_cpus except the source vCPUs work
- * is scheduled as safe work meaning all flushes will be complete once
- * the source vCPUs safe work is complete. This will depend on when
- * the guests translation ends the TB.
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
*/
void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
/**
@@ -99,18 +90,13 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
*/
void tlb_flush(CPUState *cpu);
/**
- * tlb_flush_all_cpus:
- * @cpu: src CPU of the flush
- */
-void tlb_flush_all_cpus(CPUState *src_cpu);
-/**
* tlb_flush_all_cpus_synced:
* @cpu: src CPU of the flush
*
- * Like tlb_flush_all_cpus except this except the source vCPUs work is
- * scheduled as safe work meaning all flushes will be complete once
- * the source vCPUs safe work is complete. This will depend on when
- * the guests translation ends the TB.
+ * Flush the entire TLB for all CPUs, for all MMU indexes.
+ *
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
*/
void tlb_flush_all_cpus_synced(CPUState *src_cpu);
/**
@@ -125,27 +111,16 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);
/**
- * tlb_flush_page_by_mmuidx_all_cpus:
+ * tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
- */
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
- uint16_t idxmap);
-/**
- * tlb_flush_page_by_mmuidx_all_cpus_synced:
- * @cpu: Originating CPU of the flush
- * @addr: virtual address of page to be flushed
- * @idxmap: bitmap of MMU indexes to flush
*
- * Flush one page from the TLB of all CPUs, for the specified MMU
- * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
- * vCPUs work is scheduled as safe work meaning all flushes will be
- * complete once the source vCPUs safe work is complete. This will
- * depend on when the guests translation ends the TB.
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
*/
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap);
@@ -160,24 +135,15 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
*/
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
/**
- * tlb_flush_by_mmuidx_all_cpus:
+ * tlb_flush_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
- * Flush all entries from all TLBs of all CPUs, for the specified
+ * Flush all entries from the TLB of all CPUs, for the specified
* MMU indexes.
- */
-void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
-/**
- * tlb_flush_by_mmuidx_all_cpus_synced:
- * @cpu: Originating CPU of the flush
- * @idxmap: bitmap of MMU indexes to flush
*
- * Flush all entries from all TLBs of all CPUs, for the specified
- * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
- * vCPUs work is scheduled as safe work meaning all flushes will be
- * complete once the source vCPUs safe work is complete. This will
- * depend on when the guests translation ends the TB.
+ * When this function returns, no CPUs will subsequently perform
+ * translations using the flushed TLBs.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
@@ -194,8 +160,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
- uint16_t idxmap, unsigned bits);
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
@@ -215,9 +179,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits);
/* Similarly, with broadcast and syncing. */
-void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
- vaddr len, uint16_t idxmap,
- unsigned bits);
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,
@@ -290,18 +251,12 @@ static inline void tlb_destroy(CPUState *cpu)
static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
{
}
-static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
-{
-}
static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
{
}
static inline void tlb_flush(CPUState *cpu)
{
}
-static inline void tlb_flush_all_cpus(CPUState *src_cpu)
-{
-}
static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
{
}
@@ -313,20 +268,11 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
}
-static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
- vaddr addr,
- uint16_t idxmap)
-{
-}
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
uint16_t idxmap)
{
}
-static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
-{
-}
-
static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
@@ -337,12 +283,6 @@ static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
unsigned bits)
{
}
-static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
- vaddr addr,
- uint16_t idxmap,
- unsigned bits)
-{
-}
static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
uint16_t idxmap, unsigned bits)
@@ -353,13 +293,6 @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
unsigned bits)
{
}
-static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
- vaddr addr,
- vaddr len,
- uint16_t idxmap,
- unsigned bits)
-{
-}
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
vaddr addr,
vaddr len,