aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2017-12-03 19:02:03 +0000
committerYonghong Song <yhs@fb.com>2017-12-03 19:02:03 +0000
commit170dfab48d7741847180760e8ac42308e08ee2db (patch)
tree65fb7bd6e8e8de24b0a48725ee743c1c30470fc4
parent1254c70db9559d0c7cbcfa98d046120bd15f2a29 (diff)
downloadllvm-170dfab48d7741847180760e8ac42308e08ee2db.zip
llvm-170dfab48d7741847180760e8ac42308e08ee2db.tar.gz
llvm-170dfab48d7741847180760e8ac42308e08ee2db.tar.bz2
bpf: fix bug on silently truncating 64-bit immediate
We came across an llvm bug when compiling some testcases that 64-bit immediates are silently truncated into 32-bit and then packed into BPF_JMP | BPF_K encoding. This caused comparison with wrong value. This bug looks to be introduced by r308080 (llvm 5.0). The Select_Ri pattern is supposed to be lowered into J*_Ri while the latter only support 32-bit immediate encoding, therefore Select_Ri should have similar immediate predicate check as what J*_Ri are doing. The bug is fixed by git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@315889 91177308-0d34-0410-b5e6-96231b3b80d8 in llvm 6.0. This patch is largely the same as the fix in llvm 6.0 except one minor adjustment for the test case. Reported-by: John Fastabend <john.fastabend@gmail.com> Reported-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Reviewed-by: Yonghong Song <yhs@fb.com> llvm-svn: 319633
-rw-r--r--llvm/lib/Target/BPF/BPFISelLowering.cpp8
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.td2
-rw-r--r--llvm/test/CodeGen/BPF/select_ri.ll35
3 files changed, 42 insertions, 3 deletions
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 81b0aa7..5740b49 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -578,11 +578,15 @@ BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
.addReg(LHS)
.addReg(MI.getOperand(2).getReg())
.addMBB(Copy1MBB);
- else
+ else {
+ int64_t imm32 = MI.getOperand(2).getImm();
+ // sanity check before we build J*_ri instruction.
+ assert (isInt<32>(imm32));
BuildMI(BB, DL, TII.get(NewCC))
.addReg(LHS)
- .addImm(MI.getOperand(2).getImm())
+ .addImm(imm32)
.addMBB(Copy1MBB);
+ }
// Copy0MBB:
// %FalseValue = ...
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.td b/llvm/lib/Target/BPF/BPFInstrInfo.td
index f683578..59e92f8 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.td
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.td
@@ -464,7 +464,7 @@ let usesCustomInserter = 1 in {
(ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR:$src, GPR:$src2),
"# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2",
[(set i64:$dst,
- (BPFselectcc i64:$lhs, (i64 imm:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>;
+ (BPFselectcc i64:$lhs, (i64 i64immSExt32:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>;
}
// load 64-bit global addr into register
diff --git a/llvm/test/CodeGen/BPF/select_ri.ll b/llvm/test/CodeGen/BPF/select_ri.ll
index c4ac376..3610d40 100644
--- a/llvm/test/CodeGen/BPF/select_ri.ll
+++ b/llvm/test/CodeGen/BPF/select_ri.ll
@@ -25,3 +25,38 @@ entry:
}
attributes #0 = { norecurse nounwind readonly }
+
+; test immediate out of 32-bit range
+; Source file:
+
+; unsigned long long
+; load_word(void *buf, unsigned long long off)
+; asm("llvm.bpf.load.word");
+;
+; int
+; foo(void *buf)
+; {
+; unsigned long long sum = 0;
+;
+; sum += load_word(buf, 100);
+; sum += load_word(buf, 104);
+;
+; if (sum != 0x1ffffffffULL)
+; return ~0U;
+;
+; return 0;
+;}
+
+; Function Attrs: nounwind readonly
+define i32 @foo(i8*) local_unnamed_addr #0 {
+ %2 = tail call i64 @llvm.bpf.load.word(i8* %0, i64 100)
+ %3 = tail call i64 @llvm.bpf.load.word(i8* %0, i64 104)
+ %4 = add i64 %3, %2
+ %5 = icmp ne i64 %4, 8589934591
+; CHECK: r{{[0-9]+}} = 8589934591ll
+ %6 = sext i1 %5 to i32
+ ret i32 %6
+}
+
+; Function Attrs: nounwind readonly
+declare i64 @llvm.bpf.load.word(i8*, i64) #1