diff options
Diffstat (limited to 'llvm/test/CodeGen/BPF')
| -rw-r--r-- | llvm/test/CodeGen/BPF/bpf_trap.ll | 32 | ||||
| -rw-r--r-- | llvm/test/CodeGen/BPF/jump_table_blockaddr.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/BPF/jump_table_global_var.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/BPF/jump_table_switch_stmt.ll | 60 | ||||
| -rw-r--r-- | llvm/test/CodeGen/BPF/unaligned_load_store.ll | 196 |
5 files changed, 262 insertions, 34 deletions
diff --git a/llvm/test/CodeGen/BPF/bpf_trap.ll b/llvm/test/CodeGen/BPF/bpf_trap.ll new file mode 100644 index 0000000..ab8df5f --- /dev/null +++ b/llvm/test/CodeGen/BPF/bpf_trap.ll @@ -0,0 +1,32 @@ +; RUN: llc < %s | FileCheck %s +; +target triple = "bpf" + +define i32 @test(i8 %x) { +entry: + %0 = and i8 %x, 3 + switch i8 %0, label %default.unreachable4 [ + i8 0, label %return + i8 1, label %sw.bb1 + i8 2, label %sw.bb2 + i8 3, label %sw.bb3 + ] + +sw.bb1: ; preds = %entry + br label %return + +sw.bb2: ; preds = %entry + br label %return + +sw.bb3: ; preds = %entry + br label %return + +default.unreachable4: ; preds = %entry + unreachable + +return: ; preds = %entry, %sw.bb3, %sw.bb2, %sw.bb1 + %retval.0 = phi i32 [ 12, %sw.bb1 ], [ 43, %sw.bb2 ], [ 54, %sw.bb3 ], [ 32, %entry ] + ret i32 %retval.0 +} + +; CHECK-NOT: __bpf_trap diff --git a/llvm/test/CodeGen/BPF/jump_table_blockaddr.ll b/llvm/test/CodeGen/BPF/jump_table_blockaddr.ll index d5a1d63..b7d5186 100644 --- a/llvm/test/CodeGen/BPF/jump_table_blockaddr.ll +++ b/llvm/test/CodeGen/BPF/jump_table_blockaddr.ll @@ -84,8 +84,8 @@ llc -march=bpf -mcpu=v4 < test.ll \ ; CHECK: .cfi_endproc ; CHECK: .section .jumptables,"",@progbits ; CHECK: BPF.JT.0.0: -; CHECK: .quad LBB0_3 +; CHECK: .quad LBB0_3-.text ; CHECK: .size BPF.JT.0.0, 8 ; CHECK: BPF.JT.0.1: -; CHECK: .quad LBB0_4 +; CHECK: .quad LBB0_4-.text ; CHECK: .size BPF.JT.0.1, 8 diff --git a/llvm/test/CodeGen/BPF/jump_table_global_var.ll b/llvm/test/CodeGen/BPF/jump_table_global_var.ll index bbca468..71c682f 100644 --- a/llvm/test/CodeGen/BPF/jump_table_global_var.ll +++ b/llvm/test/CodeGen/BPF/jump_table_global_var.ll @@ -78,6 +78,6 @@ llc -march=bpf -mcpu=v4 < test.ll \ ; CHECK: .cfi_endproc ; CHECK: .section .jumptables,"",@progbits ; CHECK: BPF.JT.0.0: -; CHECK: .quad LBB0_1 -; CHECK: .quad LBB0_2 +; CHECK: .quad LBB0_1-.text +; CHECK: .quad LBB0_2-.text ; CHECK: .size BPF.JT.0.0, 16 diff --git a/llvm/test/CodeGen/BPF/jump_table_switch_stmt.ll b/llvm/test/CodeGen/BPF/jump_table_switch_stmt.ll index 682b025..eb1e5bf 100644 --- a/llvm/test/CodeGen/BPF/jump_table_switch_stmt.ll +++ b/llvm/test/CodeGen/BPF/jump_table_switch_stmt.ll @@ -93,34 +93,34 @@ llc -march=bpf -mcpu=v4 -bpf-min-jump-table-entries=3 < test.ll \ ; CHECK: .cfi_endproc ; CHECK: .section .jumptables,"",@progbits ; CHECK: BPF.JT.0.0: -; CHECK: .quad LBB0_4 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_2 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_5 -; CHECK: .quad LBB0_3 +; CHECK: .quad LBB0_4-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_2-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_5-.text +; CHECK: .quad LBB0_3-.text ; CHECK: .size BPF.JT.0.0, 240 diff --git a/llvm/test/CodeGen/BPF/unaligned_load_store.ll b/llvm/test/CodeGen/BPF/unaligned_load_store.ll new file mode 100644 index 0000000..b302a80 --- /dev/null +++ b/llvm/test/CodeGen/BPF/unaligned_load_store.ll @@ -0,0 +1,196 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 + +; RUN: llc -mtriple=bpfel -mattr=+allows-misaligned-mem-access -verify-machineinstrs %s -o - \ +; RUN: | FileCheck --check-prefixes=ALL,MISALIGN %s +; RUN: llc -mtriple=bpfeb -mattr=+allows-misaligned-mem-access -verify-machineinstrs %s -o - \ +; RUN: | FileCheck --check-prefixes=ALL,MISALIGN %s + +; RUN: llc -mtriple=bpfel -verify-machineinstrs %s -o - \ +; RUN: | FileCheck --check-prefixes=ALL,ALIGN %s +; RUN: llc -mtriple=bpfeb -verify-machineinstrs %s -o - \ +; RUN: | FileCheck --check-prefixes=ALL,ALIGN %s +; NOTE: +; This test verifies that the new +bpf-allow-misaligned-mem-access +; feature allows the BPF backend to emit direct unaligned load/store +; instructions instead of byte-by-byte emulation sequences. + +; --------------------------------------------------------------------- +; i8 load +; --------------------------------------------------------------------- +define i8 @test_load_i8(i8* %p) { +; ALL-LABEL: test_load_i8: +; ALL: # %bb.0: +; ALL-NEXT: w{{[0-9]+}} = *(u8 *)(r1 + 0) +; ALL-NEXT: exit + %v = load i8, i8* %p, align 1 + ret i8 %v +} + +; --------------------------------------------------------------------- +; i8 store +; --------------------------------------------------------------------- +define void @test_store_i8(i8* %p, i8 %v) { +; ALL-LABEL: test_store_i8: +; ALL: # %bb.0: +; ALL-NEXT: *(u8 *)(r1 + 0) = w{{[0-9]+}} +; ALL-NEXT: exit + store i8 %v, i8* %p, align 1 + ret void +} + +; --------------------------------------------------------------------- +; i16 load +; --------------------------------------------------------------------- +define i16 @test_load_i16(i16* %p) { +; MISALIGN-LABEL: test_load_i16: +; MISALIGN: # %bb.0: +; MISALIGN: w{{[0-9]+}} = *(u16 *)(r1 + 0) +; MISALIGN: exit +; +; ALIGN-LABEL: test_load_i16: +; ALIGN: # %bb.0: +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0) +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1) +; ALIGN-DAG: w{{[0-9]+}} <<= 8 +; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}} +; ALIGN: exit + %v = load i16, i16* %p, align 1 + ret i16 %v +} + +; --------------------------------------------------------------------- +; i16 store +; --------------------------------------------------------------------- +define void @test_store_i16(i16* %p, i16 %v) { +; MISALIGN-LABEL: test_store_i16: +; MISALIGN: # %bb.0: +; MISALIGN: *(u16 *)(r1 + 0) = w{{[0-9]+}} +; MISALIGN: exit +; +; ALIGN-LABEL: test_store_i16: +; ALIGN: # %bb.0: +; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} >>= 8 +; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}} +; ALIGN: exit + store i16 %v, i16* %p, align 1 + ret void +} + +; --------------------------------------------------------------------- +; i32 load +; --------------------------------------------------------------------- + +define i32 @test_load_i32(i32* %p) { +; MISALIGN-LABEL: test_load_i32: +; MISALIGN: # %bb.0: +; MISALIGN: w{{[0-9]+}} = *(u32 *)(r1 + 0) +; MISALIGN: exit +; +; ALIGN-LABEL: test_load_i32: +; ALIGN: # %bb.0: +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0) +; ALIGN-DAG: w{{[0-9]+}} <<= 8 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1) +; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 2) +; ALIGN-DAG: w{{[0-9]+}} <<= 16 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 3) +; ALIGN-DAG: w{{[0-9]+}} <<= 24 +; ALIGN: exit + %v = load i32, i32* %p, align 1 + ret i32 %v +} + +; --------------------------------------------------------------------- +; i32 store +; --------------------------------------------------------------------- + +define void @test_store_i32(i32* %p, i32 %v) { +; MISALIGN-LABEL: test_store_i32: +; MISALIGN: # %bb.0: +; MISALIGN: *(u32 *)(r1 + 0) = w{{[0-9]+}} +; MISALIGN: exit +; +; ALIGN-LABEL: test_store_i32: +; ALIGN: # %bb.0: +; ALIGN-DAG: w{{[0-9]+}} = w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} >>= 24 +; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} = w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} >>= 16 +; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}} +; ALIGN-DAG: *(u8 *)(r1 + 2) = w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} >>= 8 +; ALIGN-DAG: *(u8 *)(r1 + 3) = w{{[0-9]+}} +; ALIGN: exit + store i32 %v, i32* %p, align 1 + ret void +} + +; --------------------------------------------------------------------- +; i64 load +; --------------------------------------------------------------------- + +define i64 @test_load_i64(i64* %p) { +; MISALIGN-LABEL: test_load_i64: +; MISALIGN: # %bb.0: +; MISALIGN: r0 = *(u64 *)(r1 + 0) +; MISALIGN: exit +; +; ALIGN-LABEL: test_load_i64: +; ALIGN: # %bb.0: +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 0) +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 1) +; ALIGN-DAG: r{{[0-9]+}} <<= 8 +; ALIGN-DAG: r{{[0-9]+}} |= r{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 2) +; ALIGN-DAG: r{{[0-9]+}} <<= 16 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 3) +; ALIGN-DAG: r{{[0-9]+}} <<= 24 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 4) +; ALIGN-DAG: w{{[0-9]+}} <<= 8 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 5) +; ALIGN-DAG: w{{[0-9]+}} |= w{{[0-9]+}} +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 6) +; ALIGN-DAG: w{{[0-9]+}} <<= 16 +; ALIGN-DAG: w{{[0-9]+}} = *(u8 *)(r1 + 7) +; ALIGN-DAG: w{{[0-9]+}} <<= 24 +; ALIGN-DAG: r{{[0-9]+}} <<= 32 +; ALIGN: exit + %v = load i64, i64* %p, align 1 + ret i64 %v +} + +; --------------------------------------------------------------------- +; i64 store +; --------------------------------------------------------------------- + +define void @test_store_i64(i64* %p, i64 %v) { +; MISALIGN-LABEL: test_store_i64: +; MISALIGN: # %bb.0: +; MISALIGN: *(u64 *)(r1 + 0) = r2 +; MISALIGN: exit +; +; ALIGN-LABEL: test_store_i64: +; ALIGN: # %bb.0: +; ALIGN-DAG: *(u8 *)(r1 + 0) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} = r{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 56 +; ALIGN-DAG: *(u8 *)(r1 + 1) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 48 +; ALIGN-DAG: *(u8 *)(r1 + 2) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 40 +; ALIGN-DAG: *(u8 *)(r1 + 3) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 32 +; ALIGN-DAG: *(u8 *)(r1 + 4) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 24 +; ALIGN-DAG: *(u8 *)(r1 + 5) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 16 +; ALIGN-DAG: *(u8 *)(r1 + 6) = w{{[0-9]+}} +; ALIGN-DAG: r{{[0-9]+}} >>= 8 +; ALIGN-DAG: *(u8 *)(r1 + 7) = w{{[0-9]+}} +; ALIGN: exit + store i64 %v, i64* %p, align 1 + ret void +} |
