aboutsummaryrefslogtreecommitdiff
path: root/llvm/utils/UpdateTestChecks/asm.py
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2020-02-20 09:22:56 -0500
committerSanjay Patel <spatel@rotateright.com>2020-02-20 09:33:05 -0500
commit15e20dcb8f9decf1928871d562a3724e8cc1e343 (patch)
tree4d58881b2ea9e59800de7d1f146a95af49b72339 /llvm/utils/UpdateTestChecks/asm.py
parent977cd661cf019039dec7ffdd15bf0ac500828c87 (diff)
downloadllvm-15e20dcb8f9decf1928871d562a3724e8cc1e343.zip
llvm-15e20dcb8f9decf1928871d562a3724e8cc1e343.tar.gz
llvm-15e20dcb8f9decf1928871d562a3724e8cc1e343.tar.bz2
[Utils][x86] add an option to reduce scrubbing of shuffles with memops
I was drafting a patch that would increase broadcast load usage, but our shuffle scrubbing makes it impossible to see if the memory operand offset was getting created correctly. I'm proposing to make that an option (defaulted to 'off' for now to reduce regression test churn). The updated files provide examples of tests where we can now verify that the pointer offset for a loaded memory operand is correct. We still have stack and constant scrubbing that can obscure the operand even if we don't scrub the entire instruction. Differential Revision: https://reviews.llvm.org/D74775
Diffstat (limited to 'llvm/utils/UpdateTestChecks/asm.py')
-rw-r--r--llvm/utils/UpdateTestChecks/asm.py13
1 files changed, 12 insertions, 1 deletions
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index 06f8a22..ea416ed 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -148,6 +148,12 @@ SCRUB_X86_SHUFFLES_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
flags=re.M))
+
+SCRUB_X86_SHUFFLES_NO_MEM_RE = (
+ re.compile(
+ r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$',
+ flags=re.M))
+
SCRUB_X86_SPILL_RELOAD_RE = (
re.compile(
r'-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$',
@@ -163,8 +169,13 @@ def scrub_asm_x86(asm, args):
asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
+
# Detect shuffle asm comments and hide the operands in favor of the comments.
- asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
+ if getattr(args, 'no_x86_scrub_mem_shuffle', True):
+ asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r'\1 {{.*#+}} \2', asm)
+ else:
+ asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
+
# Detect stack spills and reloads and hide their exact offset and whether
# they used the stack pointer or frame pointer.
asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)